@@ -976,7 +976,7 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
976
976
return result
977
977
978
978
979
- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
979
+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
980
980
class ReplaceTrivialConvWithLinear (ExportPass ):
981
981
"""
982
982
In nn.Conv1d, the operand shapes are:
@@ -1256,7 +1256,7 @@ def call_operator(
1256
1256
return self .transpose_dims (new_op , meta , 0 , dim )
1257
1257
1258
1258
1259
- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1259
+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
1260
1260
class ReplaceConvWithIm2RowAndLinear (ExportPass ):
1261
1261
"""
1262
1262
Replace convolution where groups=1 with im2row followed by a linear op.
@@ -1449,7 +1449,7 @@ def call_operator(self, op, args, kwargs, meta):
1449
1449
)
1450
1450
1451
1451
1452
- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1452
+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
1453
1453
class ReplaceTransposedConvWithLinearPass (ExportPass ):
1454
1454
"""
1455
1455
Replace transposed convolution where groups=1 with transposed_im2row
@@ -1686,7 +1686,7 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
1686
1686
return result
1687
1687
1688
1688
1689
- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1689
+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
1690
1690
class ReplaceLinearWithFullyConnectedOpPass (ExportPass ):
1691
1691
"""
1692
1692
If the input of linear/quantized_linear op is a vector, replace it with
0 commit comments