Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

skip dummy inference and run_shape_analysis #3212

Merged
merged 30 commits into from
Oct 29, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
458a4d1
skip run_shape_analysis
lanluo-nvidia Oct 6, 2024
2f408f9
test
lanluo-nvidia Oct 6, 2024
1c5e86c
test
lanluo-nvidia Oct 6, 2024
ba487dc
test
lanluo-nvidia Oct 6, 2024
99d2274
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 6, 2024
2b43480
test
lanluo-nvidia Oct 6, 2024
b4e02e1
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 11, 2024
3d94f8b
test
lanluo-nvidia Oct 13, 2024
28ba6cc
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 15, 2024
b89cbe0
resolve comments
lanluo-nvidia Oct 15, 2024
2843d37
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 16, 2024
3eb48d7
test
lanluo-nvidia Oct 16, 2024
50eb0d8
replace dummy inference
lanluo-nvidia Oct 20, 2024
95ed602
test
lanluo-nvidia Oct 20, 2024
120f30d
test
lanluo-nvidia Oct 21, 2024
424cbf7
add run_test_with_dynamic_shape change
lanluo-nvidia Oct 21, 2024
2fc9cef
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 21, 2024
ef54cfc
split the PR, add dummy inference for converter test
lanluo-nvidia Oct 21, 2024
14f5d61
test
lanluo-nvidia Oct 22, 2024
7563959
test
lanluo-nvidia Oct 22, 2024
77355f0
test
lanluo-nvidia Oct 22, 2024
13361fd
add linear lowering meta val
lanluo-nvidia Oct 22, 2024
f0a9fef
add linear_lowering change
lanluo-nvidia Oct 23, 2024
cff64a4
test
lanluo-nvidia Oct 23, 2024
933abac
test
lanluo-nvidia Oct 23, 2024
8417684
resolve comments
lanluo-nvidia Oct 25, 2024
8676f88
test
lanluo-nvidia Oct 25, 2024
076f47a
resolve comments
lanluo-nvidia Oct 29, 2024
8250179
Merge branch 'main' into lluo/save_remove_inputs
lanluo-nvidia Oct 29, 2024
96e93e4
resolve comments
lanluo-nvidia Oct 29, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add run_test_with_dynamic_shape change
  • Loading branch information
lanluo-nvidia committed Oct 21, 2024
commit 424cbf7d16a21b7ff9608a8c92d3407118eab12a
20 changes: 18 additions & 2 deletions tests/py/dynamo/conversion/harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
import time
import unittest
from typing import Callable, List, Optional, Tuple
from typing import Any, Callable, List, Optional, Tuple

import torch
import torch_tensorrt
Expand All @@ -12,6 +12,7 @@
from torch_tensorrt import Input
from torch_tensorrt._enums import dtype
from torch_tensorrt.dynamo import _defaults
from torch_tensorrt.dynamo._defaults import default_device
from torch_tensorrt.dynamo._settings import CompilationSettings

# Use interpreter, input spec, and test case from fx_ts_compat to test Dynamo Converter Registry
Expand Down Expand Up @@ -223,10 +224,22 @@ def generate_graph(
use_dynamo_tracer: bool,
enable_passes: bool,
propagate_shapes: bool = False,
torch_export_dynamic_shapes: Optional[Any] = None,
):
mod = mod.eval()
if use_dynamo_tracer:
exported_program = torch_tensorrt.dynamo.trace(mod, tuple(original_inputs))
if torch_export_dynamic_shapes is not None:
device = default_device()
torch_export_inputs = get_torch_inputs(original_inputs, device)
exported_program = torch.export.export(
mod,
tuple(torch_export_inputs),
dynamic_shapes=torch_export_dynamic_shapes,
)
else:
exported_program = torch_tensorrt.dynamo.trace(
mod, tuple(original_inputs)
)
exported_program = pre_export_lowering(exported_program)
exported_program = exported_program.run_decompositions(
get_decompositions(False)
Expand Down Expand Up @@ -387,13 +400,16 @@ def run_test_with_dynamic_shape(
propagate_shapes=False,
check_dtype=True,
make_refittable=False,
# this field is optional, in case user wants to specify custom dynamic_shapes rules for the testcase
torch_export_dynamic_shapes: Optional[Any] = None,
):
mod = self.generate_graph(
mod,
input_specs,
use_dynamo_tracer=use_dynamo_tracer,
enable_passes=enable_passes,
propagate_shapes=propagate_shapes,
torch_export_dynamic_shapes=torch_export_dynamic_shapes,
)

# Previous instance of the interpreter auto-casted 64-bit inputs
Expand Down
33 changes: 32 additions & 1 deletion tests/py/dynamo/conversion/test_ge_aten.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import torch.nn as nn
from parameterized import parameterized
from torch.export import Dim
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt import Input

Expand Down Expand Up @@ -85,10 +86,40 @@ def forward(self, lhs_val, rhs_val):

@parameterized.expand(
[
("2d_2d", (2, 3), (4, 3), (5, 3), (2, 3), (4, 3), (5, 3)),
("3d_2d", (2, 2, 2), (2, 3, 2), (2, 4, 2), (2, 1), (3, 1), (4, 1)),
]
)
def test_ge_dynamic_tensor_torch_export(self, *args):
class ge(nn.Module):
def forward(self, lhs_val, rhs_val):
return torch.ops.aten.ge.Tensor(lhs_val, rhs_val)

input_specs = [
Input(
min_shape=args[1],
opt_shape=args[2],
max_shape=args[3],
),
Input(
min_shape=args[4],
opt_shape=args[5],
max_shape=args[6],
),
]
dyn_dim = Dim("dyn_dim", min=2, max=4)
torch_export_dynamic_shapes = {"lhs_val": {1: dyn_dim}, "rhs_val": {0: dyn_dim}}

self.run_test_with_dynamic_shape(
ge(),
input_specs,
torch_export_dynamic_shapes=torch_export_dynamic_shapes,
)

@parameterized.expand(
[
("2d_2d", (2, 3), (4, 3), (5, 3), (2, 3), (4, 3), (5, 3)),
]
)
def test_ge_dynamic_tensor(self, *args):
class ge(nn.Module):
def forward(self, lhs_val, rhs_val):
Expand Down
Loading