From 0ee515a7be99babcf4836da20eb297e41c231ca3 Mon Sep 17 00:00:00 2001 From: Ean Garvey <87458719+monorimet@users.noreply.github.com> Date: Fri, 29 Jul 2022 17:17:55 -0500 Subject: [PATCH] Fix GPU benchmarks on PyTorch model tests. (#218) * Add CUDA_BENCHMARKS option to setup.venv to enable PyTorch benchmarks on CUDA. * Fix PyTorch GPU benchmarks for tank models. --- .github/workflows/test-models.yml | 6 +- requirements.txt | 1 + setup_venv.sh | 12 ++- shark/shark_benchmark_runner.py | 46 ++++++---- tank/model_utils.py | 21 +++++ .../MiniLM-L12-H384-uncased_test.py | 23 +---- .../albert-base-v2/albert-base-v2_test.py | 20 +---- tank/pytorch/alexnet/alexnet_test.py | 21 +---- tank/pytorch/bench_results.csv | 1 + .../bert-base-uncased_test.py | 15 +--- .../distilbert-base-uncased_test.py | 29 +++--- .../mobilebert-uncased_test.py | 21 +---- tank/pytorch/resnet101/resnet101_test.py | 19 +--- tank/pytorch/resnet18/resnet18_test.py | 18 +--- tank/pytorch/resnet50/resnet50_test.py | 17 +--- .../squeezenet1_0/squeezenet1_0_test.py | 17 +--- .../wide_resnet50_2/wide_resnet50_2_test.py | 17 +--- tank/tf/minilm_tf_test.py | 90 ------------------- 18 files changed, 95 insertions(+), 299 deletions(-) create mode 100644 tank/pytorch/bench_results.csv delete mode 100644 tank/tf/minilm_tf_test.py diff --git a/.github/workflows/test-models.yml b/.github/workflows/test-models.yml index c1d47f60dd..0f1002ddcc 100644 --- a/.github/workflows/test-models.yml +++ b/.github/workflows/test-models.yml @@ -75,7 +75,7 @@ jobs: if: matrix.suite == 'cpu' run: | cd $GITHUB_WORKSPACE - PYTHON=python${{ matrix.python-version }} IMPORTER=1 ./setup_venv.sh + PYTHON=python${{ matrix.python-version }} ./setup_venv.sh source shark.venv/bin/activate pytest -k 'cpu' --ignore=shark/tests/test_shark_importer.py --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=benchmarks/tests/test_benchmark.py @@ -83,7 +83,7 @@ jobs: if: matrix.suite == 'gpu' run: | cd $GITHUB_WORKSPACE - PYTHON=python${{ matrix.python-version }} IMPORTER=1 ./setup_venv.sh + PYTHON=python${{ matrix.python-version }} ./setup_venv.sh source shark.venv/bin/activate pytest -k "gpu" --ignore=shark/tests/test_shark_importer.py --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=benchmarks/tests/test_benchmark.py @@ -91,6 +91,6 @@ jobs: if: matrix.suite == 'vulkan' run: | cd $GITHUB_WORKSPACE - PYTHON=python${{ matrix.python-version }} IMPORTER=1 ./setup_venv.sh + PYTHON=python${{ matrix.python-version }} ./setup_venv.sh source shark.venv/bin/activate pytest -k 'vulkan' --ignore=shark/tests/test_shark_importer.py --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=benchmarks/tests/test_benchmark.py diff --git a/requirements.txt b/requirements.txt index 746272d1aa..f74d1bf8b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ gsutil # Testing pytest pytest-xdist +Pillow diff --git a/setup_venv.sh b/setup_venv.sh index afb97680e4..389b1ad9fc 100755 --- a/setup_venv.sh +++ b/setup_venv.sh @@ -98,7 +98,7 @@ if [[ ! -z "${IMPORTER}" ]]; then echo "${Yellow}Installing importer tools.." if [[ $(uname -s) = 'Linux' ]]; then echo "${Yellow}Linux detected.. installing Linux importer tools" - $PYTHON -m pip install --upgrade -r "$TD/requirements-importer.txt" -f https://github.com/${RUNTIME}/releases --extra-index-url https://test.pypi.org/simple/ --extra-index-url https://download.pytorch.org/whl/nightly/cpu + $PYTHON -m pip install --upgrade -r "$TD/requirements-importer.txt" -f https://github.com/${RUNTIME}/releases --extra-index-url https://test.pypi.org/simple/ --extra-index-url https://download.pytorch.org/whl/nightly/cu116 elif [[ $(uname -s) = 'Darwin' ]]; then echo "${Yellow}macOS detected.. installing macOS importer tools" #Conda seems to have some problems installing these packages and hope they get resolved upstream. @@ -108,6 +108,16 @@ fi $PYTHON -m pip install -e . --extra-index-url https://download.pytorch.org/whl/nightly/cpu -f https://github.com/llvm/torch-mlir/releases -f https://github.com/${RUNTIME}/releases +if [[ $(uname -s) = 'Linux' ]]; then + $PYTHON -m pip uninstall -y torch torchvision + $PYTHON -m pip install --pre torch torchvision --extra-index-url https://download.pytorch.org/whl/nightly/cu116 + if [ $? -eq 0 ];then + echo "Successfully Installed torch + cu116." + else + echo "Could not install torch + cu116." >&2 + fi +fi + if [[ -z "${CONDA_PREFIX}" ]]; then echo "${Green}Before running examples activate venv with:" echo " ${Green}source $VENV_DIR/bin/activate" diff --git a/shark/shark_benchmark_runner.py b/shark/shark_benchmark_runner.py index ca636b4b76..891f68b0ab 100644 --- a/shark/shark_benchmark_runner.py +++ b/shark/shark_benchmark_runner.py @@ -19,6 +19,7 @@ run_benchmark_module, ) from shark.parser import shark_args +from tank.model_utils import get_torch_model from datetime import datetime import time import csv @@ -59,20 +60,33 @@ def setup_cl(self, input_tensors): mlir_dialect=self.mlir_dialect, ) - def benchmark_frontend(self, inputs): + def benchmark_frontend(self, inputs, modelname): if self.frontend in ["pytorch", "torch"]: - return self.benchmark_torch(inputs) + return self.benchmark_torch(modelname) elif self.frontend in ["tensorflow", "tf"]: - return self.benchmark_tf(inputs) + return self.benchmark_tf(inputs, modelname) + + def benchmark_torch(self, modelname): + import torch + + if self.device == "gpu": + torch.set_default_tensor_type(torch.cuda.FloatTensor) + else: + torch.set_default_tensor_type(torch.FloatTensor) + torch_device = torch.device( + "cuda:0" if self.device == "gpu" else "cpu" + ) + HFmodel, input, act_out = get_torch_model(modelname) + frontend_model = HFmodel.model + frontend_model.to(torch_device) + input.to(torch_device) - def benchmark_torch(self, input_tuple): - inputs = input_tuple[0] for i in range(shark_args.num_warmup_iterations): - self.frontend_model.forward(inputs) + frontend_model.forward(input) begin = time.time() for i in range(shark_args.num_iterations): - out = self.frontend_model.forward(inputs) + out = frontend_model.forward(input) if i == shark_args.num_iterations - 1: end = time.time() break @@ -84,13 +98,13 @@ def benchmark_torch(self, input_tuple): f"{((end-begin)/shark_args.num_iterations)*1000}", ] - def benchmark_tf(self, inputs): + def benchmark_tf(self, frontend_model, inputs): for i in range(shark_args.num_warmup_iterations): - self.frontend_model.forward(*inputs) + frontend_model.forward(*inputs) begin = time.time() for i in range(shark_args.num_iterations): - out = self.frontend_model.forward(*inputs) + out = frontend_model.forward(*inputs) if i == shark_args.num_iterations - 1: end = time.time() break @@ -162,12 +176,12 @@ def benchmark_all_csv( for p in platforms: if p == "frontend": bench_result["platform"] = frontend - bench_result["iter/sec"] = self.benchmark_frontend(inputs)[ - 0 - ] - bench_result["ms/iter"] = self.benchmark_frontend(inputs)[ - 1 - ] + bench_result["iter/sec"] = self.benchmark_frontend( + inputs, modelname + )[0] + bench_result["ms/iter"] = self.benchmark_frontend( + inputs, modelname + )[1] elif p == "shark_python": bench_result["platform"] = "shark_python" bench_result["iter/sec"] = self.benchmark_python(inputs)[0] diff --git a/tank/model_utils.py b/tank/model_utils.py index 652e1fe920..813e55f8ae 100644 --- a/tank/model_utils.py +++ b/tank/model_utils.py @@ -2,9 +2,27 @@ import torch import numpy as np +import sys torch.manual_seed(0) +vision_models = [ + "alexnet", + "resnet101", + "resnet18", + "resnet50", + "squeezenet1_0", + "wide_resnet50_2", +] + + +def get_torch_model(modelname): + if modelname in vision_models: + return get_vision_model(modelname) + else: + return get_hf_model(modelname) + + ##################### Hugging Face LM Models ################################### @@ -12,7 +30,10 @@ class HuggingFaceLanguage(torch.nn.Module): def __init__(self, hf_model_name): super().__init__() from transformers import AutoModelForSequenceClassification + import transformers as trf + transformers_path = trf.__path__[0] + hf_model_path = f"{transformers_path}/models/{hf_model_name}" self.model = AutoModelForSequenceClassification.from_pretrained( hf_model_name, # The pretrained model. num_labels=2, # The number of output labels--2 for binary classification. diff --git a/tank/pytorch/MiniLM-L12-H384-uncased/MiniLM-L12-H384-uncased_test.py b/tank/pytorch/MiniLM-L12-H384-uncased/MiniLM-L12-H384-uncased_test.py index feaec80c33..620061fe0f 100644 --- a/tank/pytorch/MiniLM-L12-H384-uncased/MiniLM-L12-H384-uncased_test.py +++ b/tank/pytorch/MiniLM-L12-H384-uncased/MiniLM-L12-H384-uncased_test.py @@ -1,10 +1,8 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info from tank.model_utils import compare_tensors -from shark.parser import shark_args from shark.shark_downloader import download_torch_model -import torch import unittest import numpy as np import pytest @@ -13,20 +11,14 @@ class MiniLMModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "microsoft/MiniLM-L12-H384-uncased", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -50,18 +42,9 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - from tank.model_utils import get_hf_model - - torch.manual_seed(0) - - model, input, act_out = get_hf_model( - "microsoft/MiniLM-L12-H384-uncased" - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), - "MiniLM-L12-H384-uncased", + (input), + "microsoft/MiniLM-L12-H384-uncased", dynamic, device, "torch", @@ -72,8 +55,6 @@ class MiniLMModuleTest(unittest.TestCase): @pytest.fixture(autouse=True) def configure(self, pytestconfig): self.module_tester = MiniLMModuleTester(self) - self.module_tester.save_mlir = pytestconfig.getoption("save_mlir") - self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") def test_module_static_cpu(self): diff --git a/tank/pytorch/albert-base-v2/albert-base-v2_test.py b/tank/pytorch/albert-base-v2/albert-base-v2_test.py index 9e4431d59d..18c0f9695b 100644 --- a/tank/pytorch/albert-base-v2/albert-base-v2_test.py +++ b/tank/pytorch/albert-base-v2/albert-base-v2_test.py @@ -1,10 +1,8 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_hf_model, compare_tensors -from shark.parser import shark_args +from tank.model_utils import compare_tensors from shark.shark_downloader import download_torch_model -import torch import unittest import numpy as np import pytest @@ -13,20 +11,14 @@ class AlbertModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "albert-base-v2", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -50,14 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - - torch.manual_seed(0) - - model, input, act_out = get_hf_model("albert-base-v2") - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "albert-base-v2", dynamic, device, @@ -69,8 +55,6 @@ class AlbertModuleTest(unittest.TestCase): @pytest.fixture(autouse=True) def configure(self, pytestconfig): self.module_tester = AlbertModuleTester(self) - self.module_tester.save_mlir = pytestconfig.getoption("save_mlir") - self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") def test_module_static_cpu(self): diff --git a/tank/pytorch/alexnet/alexnet_test.py b/tank/pytorch/alexnet/alexnet_test.py index 5d6a8c36ac..f96d671a13 100644 --- a/tank/pytorch/alexnet/alexnet_test.py +++ b/tank/pytorch/alexnet/alexnet_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args +from tank.model_utils import compare_tensors from shark.shark_downloader import download_torch_model import unittest @@ -12,20 +11,14 @@ class AlexnetModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "alexnet", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -49,16 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.alexnet(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "alexnet", dynamic, device, @@ -70,8 +55,6 @@ class AlexnetModuleTest(unittest.TestCase): @pytest.fixture(autouse=True) def configure(self, pytestconfig): self.module_tester = AlexnetModuleTester(self) - self.module_tester.save_mlir = pytestconfig.getoption("save_mlir") - self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") def test_module_static_cpu(self): diff --git a/tank/pytorch/bench_results.csv b/tank/pytorch/bench_results.csv new file mode 100644 index 0000000000..d47e1ff922 --- /dev/null +++ b/tank/pytorch/bench_results.csv @@ -0,0 +1 @@ +platform,model,dynamic,device,iter/sec,ms/iter,datetime diff --git a/tank/pytorch/bert-base-uncased/bert-base-uncased_test.py b/tank/pytorch/bert-base-uncased/bert-base-uncased_test.py index 5bd9be37ca..12da2a4675 100644 --- a/tank/pytorch/bert-base-uncased/bert-base-uncased_test.py +++ b/tank/pytorch/bert-base-uncased/bert-base-uncased_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_hf_model, compare_tensors -from shark.parser import shark_args +from tank.model_utils import compare_tensors from shark.shark_downloader import download_torch_model import torch @@ -25,8 +24,6 @@ def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "bert-base-uncased", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -50,14 +47,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - - torch.manual_seed(0) - - model, input, act_out = get_hf_model("bert-base-uncased") - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "bert-base-uncased", dynamic, device, @@ -69,8 +60,6 @@ class BertBaseUncasedModuleTest(unittest.TestCase): @pytest.fixture(autouse=True) def configure(self, pytestconfig): self.module_tester = BertBaseUncasedModuleTester(self) - self.module_tester.save_mlir = pytestconfig.getoption("save_mlir") - self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") def test_module_static_cpu(self): diff --git a/tank/pytorch/distilbert-base-uncased/distilbert-base-uncased_test.py b/tank/pytorch/distilbert-base-uncased/distilbert-base-uncased_test.py index a30b6c081b..dab3977b14 100644 --- a/tank/pytorch/distilbert-base-uncased/distilbert-base-uncased_test.py +++ b/tank/pytorch/distilbert-base-uncased/distilbert-base-uncased_test.py @@ -1,10 +1,9 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_hf_model, compare_tensors +from tank.model_utils import compare_tensors from shark.parser import shark_args from shark.shark_downloader import download_torch_model -import torch import unittest import numpy as np import pytest @@ -13,20 +12,14 @@ class DistilBertModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "distilbert-base-uncased", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -50,14 +43,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - - torch.manual_seed(0) - - model, input, act_out = get_hf_model("distilbert-base-uncased") - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "distilbert-base-uncased", dynamic, device, @@ -73,19 +60,25 @@ def configure(self, pytestconfig): self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") - @pytest.mark.skip(reason="DistilBert needs to be uploaded to cloud.") + @pytest.mark.skip( + reason="Fails to lower in torch-mlir. See https://github.com/nod-ai/SHARK/issues/222" + ) def test_module_static_cpu(self): dynamic = False device = "cpu" self.module_tester.create_and_check_module(dynamic, device) - @pytest.mark.skip(reason="DistilBert needs to be uploaded to cloud.") + @pytest.mark.skip( + reason="Fails to lower in torch-mlir. See https://github.com/nod-ai/SHARK/issues/222" + ) def test_module_dynamic_cpu(self): dynamic = True device = "cpu" self.module_tester.create_and_check_module(dynamic, device) - @pytest.mark.skip(reason="DistilBert needs to be uploaded to cloud.") + @pytest.mark.skip( + reason="Fails to lower in torch-mlir. See https://github.com/nod-ai/SHARK/issues/222" + ) @pytest.mark.skipif( check_device_drivers("gpu"), reason=device_driver_info("gpu") ) diff --git a/tank/pytorch/mobilebert-uncased/mobilebert-uncased_test.py b/tank/pytorch/mobilebert-uncased/mobilebert-uncased_test.py index 022e69a00a..1bab886dd5 100644 --- a/tank/pytorch/mobilebert-uncased/mobilebert-uncased_test.py +++ b/tank/pytorch/mobilebert-uncased/mobilebert-uncased_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_hf_model, compare_tensors -from shark.parser import shark_args +from tank.model_utils import compare_tensors from shark.shark_downloader import download_torch_model import torch @@ -13,20 +12,14 @@ class MobileBertModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "google/mobilebert-uncased", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -50,15 +43,9 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - - torch.manual_seed(0) - - model, input, act_out = get_hf_model("google/mobilebert-uncased") - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), - "mobilebert-uncased", + (input), + "google/mobilebert-uncased", dynamic, device, "torch", @@ -69,8 +56,6 @@ class MobileBertModuleTest(unittest.TestCase): @pytest.fixture(autouse=True) def configure(self, pytestconfig): self.module_tester = MobileBertModuleTester(self) - self.module_tester.save_mlir = pytestconfig.getoption("save_mlir") - self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb") self.module_tester.benchmark = pytestconfig.getoption("benchmark") def test_module_static_cpu(self): diff --git a/tank/pytorch/resnet101/resnet101_test.py b/tank/pytorch/resnet101/resnet101_test.py index 2e9f2db449..89663901e9 100644 --- a/tank/pytorch/resnet101/resnet101_test.py +++ b/tank/pytorch/resnet101/resnet101_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info -from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args +from tank.model_utils import compare_tensors from shark.shark_downloader import download_torch_model import unittest @@ -12,20 +11,14 @@ class Resnet101ModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "resnet101", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -49,16 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.resnet101(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "resnet101", dynamic, device, diff --git a/tank/pytorch/resnet18/resnet18_test.py b/tank/pytorch/resnet18/resnet18_test.py index c97b2278de..db6774ef49 100644 --- a/tank/pytorch/resnet18/resnet18_test.py +++ b/tank/pytorch/resnet18/resnet18_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args from shark.shark_downloader import download_torch_model import unittest @@ -12,22 +11,15 @@ class Resnet18ModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "resnet18", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb - # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( # model, # (input,), @@ -49,16 +41,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.resnet18(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "resnet18", dynamic, device, diff --git a/tank/pytorch/resnet50/resnet50_test.py b/tank/pytorch/resnet50/resnet50_test.py index ddc9d6d2df..89cdf04d5b 100644 --- a/tank/pytorch/resnet50/resnet50_test.py +++ b/tank/pytorch/resnet50/resnet50_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args from shark.shark_downloader import download_torch_model import unittest @@ -12,20 +11,14 @@ class Resnet50ModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "resnet50", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -49,16 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.resnet50(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "resnet50", dynamic, device, diff --git a/tank/pytorch/squeezenet1_0/squeezenet1_0_test.py b/tank/pytorch/squeezenet1_0/squeezenet1_0_test.py index 34bac0d6cf..9173d1e9d6 100644 --- a/tank/pytorch/squeezenet1_0/squeezenet1_0_test.py +++ b/tank/pytorch/squeezenet1_0/squeezenet1_0_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args from shark.shark_downloader import download_torch_model import unittest @@ -12,20 +11,14 @@ class SqueezenetModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "squeezenet1_0", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -49,16 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.squeezenet1_0(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "squeezenet1_0", dynamic, device, diff --git a/tank/pytorch/wide_resnet50_2/wide_resnet50_2_test.py b/tank/pytorch/wide_resnet50_2/wide_resnet50_2_test.py index 5f8dd655a2..70b0459674 100644 --- a/tank/pytorch/wide_resnet50_2/wide_resnet50_2_test.py +++ b/tank/pytorch/wide_resnet50_2/wide_resnet50_2_test.py @@ -1,7 +1,6 @@ from shark.shark_inference import SharkInference from shark.iree_utils._common import check_device_drivers, device_driver_info from tank.model_utils import get_vision_model, compare_tensors -from shark.parser import shark_args from shark.shark_downloader import download_torch_model import unittest @@ -12,20 +11,14 @@ class WideResnet50ModuleTester: def __init__( self, - save_mlir=False, - save_vmfb=False, benchmark=False, ): - self.save_mlir = save_mlir - self.save_vmfb = save_vmfb self.benchmark = benchmark def create_and_check_module(self, dynamic, device): model_mlir, func_name, input, act_out = download_torch_model( "wide_resnet50_2", dynamic ) - shark_args.save_mlir = self.save_mlir - shark_args.save_vmfb = self.save_vmfb # from shark.shark_importer import SharkImporter # mlir_importer = SharkImporter( @@ -49,16 +42,8 @@ def create_and_check_module(self, dynamic, device): assert True == compare_tensors(act_out, results) if self.benchmark == True: - import torch - import torchvision.models as models - - torch.manual_seed(0) - model, input, act_out = get_vision_model( - models.wide_resnet50_2(pretrained=True) - ) - shark_module.shark_runner.frontend_model = model shark_module.shark_runner.benchmark_all_csv( - (input,), + (input), "wide_resnet50_2", dynamic, device, diff --git a/tank/tf/minilm_tf_test.py b/tank/tf/minilm_tf_test.py deleted file mode 100644 index 10cc50f965..0000000000 --- a/tank/tf/minilm_tf_test.py +++ /dev/null @@ -1,90 +0,0 @@ -from shark.shark_inference import SharkInference -from shark.iree_utils._common import check_device_drivers -from tank.model_utils_tf import get_TFhf_model, compare_tensors_tf - -import tensorflow as tf -import unittest -import numpy as np -import pytest - - -class MiniLMTFModuleTester: - def create_and_check_module(self, dynamic, device): - model, input, act_out = get_TFhf_model( - "microsoft/MiniLM-L12-H384-uncased" - ) - shark_module = SharkInference( - model, (input,), device=device, dynamic=dynamic, jit_trace=True - ) - shark_module.set_frontend("tensorflow") - shark_module.compile() - results = shark_module.forward((input)) - assert True == compare_tensors_tf(act_out, results) - - -class MiniLMTFModuleTest(unittest.TestCase): - def setUp(self): - self.module_tester = MiniLMTFModuleTester() - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - def test_module_static_cpu(self): - dynamic = False - device = "cpu" - self.module_tester.create_and_check_module(dynamic, device) - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - @pytest.mark.xfail( - reason="Language models currently failing for dynamic case" - ) - def test_module_dynamic_cpu(self): - dynamic = True - device = "cpu" - self.module_tester.create_and_check_module(dynamic, device) - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - @pytest.mark.skipif( - check_device_drivers("gpu"), reason="nvidia-smi not found" - ) - def test_module_static_gpu(self): - dynamic = False - device = "gpu" - self.module_tester.create_and_check_module(dynamic, device) - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - @pytest.mark.xfail( - reason="Language models currently failing for dynamic case" - ) - @pytest.mark.skipif( - check_device_drivers("gpu"), reason="nvidia-smi not found" - ) - def test_module_dynamic_gpu(self): - dynamic = True - device = "gpu" - self.module_tester.create_and_check_module(dynamic, device) - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - @pytest.mark.skipif( - check_device_drivers("vulkan"), - reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases", - ) - def test_module_static_vulkan(self): - dynamic = False - device = "vulkan" - self.module_tester.create_and_check_module(dynamic, device) - - @pytest.mark.skip(reason="TF testing temporarily unavailable.") - @pytest.mark.xfail( - reason="Language models currently failing for dynamic case" - ) - @pytest.mark.skipif( - check_device_drivers("vulkan"), - reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases", - ) - def test_module_dynamic_vulkan(self): - dynamic = True - device = "vulkan" - self.module_tester.create_and_check_module(dynamic, device) - - -if __name__ == "__main__": - unittest.main()