Open
Description
import torch
class PowModel(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x ** 0.5
model = PowModel()
print(model(torch.tensor(2)))
prog = torch.onnx.export(PowModel(), (torch.tensor(2),), dynamo=True)
print(prog(torch.tensor(2)))
print(prog)
Here pytorch does type promotion on the base, but ONNX does not.
tensor(1.4142)
(tensor(1),)
ONNXProgram(
model=
<
ir_version=10,
opset_imports={'': 18},
producer_name='pytorch',
producer_version='2.8.0.dev20250414+cpu',
domain=None,
model_version=None,
>
graph(
name=main_graph,
inputs=(
%"x"<INT64,[]>
),
outputs=(
%"pow_1"<INT64,[]>
),
) {
0 | # node_Constant_0
%"val_0"<FLOAT,[]> ⬅️ ::Constant() {value=Tensor<FLOAT,[]>(array(0.5, dtype=float32), name=None)}
1 | # node_Pow_1
%"pow_1"<INT64,[]> ⬅️ ::Pow(%"x", %"val_0")
return %"pow_1"<INT64,[]>
}
,
exported_program=
ExportedProgram:
class GraphModule(torch.nn.Module):
def forward(self, x: "i64[]"):
# File: /home/justinchu/dev/onnxscript/test.py:7 in forward, code: return x ** 0.5
pow_1: "f32[]" = torch.ops.aten.pow.Tensor_Scalar(x, 0.5); x = None
return (pow_1,)
Graph signature:
# inputs
x: USER_INPUT
# outputs
pow_1: USER_OUTPUT
Range constraints: {}
)