Skip to content

Commit

Permalink
AO migration: replace torch internal callsites (pytorch#94170)
Browse files Browse the repository at this point in the history
Summary:

Do the following renames:
`torch.quantization` -> `torch.ao.quantization`
`torch.nn.quantized` -> `torch.ao.nn.quantized`
`torch.nn.quantizable` -> `torch.ao.nn.quantizable`
`torch.nn.qat` -> `torch.ao.nn.qat`
`torch.nn.intrinsic` -> `torch.ao.nn.intrinsic`

And then, do
`torch.ao.nn.quantized._reference` -> `torch.ao.nn.quantized.reference` to clean up the aftermath of pytorch#84974

Then, manually update `test/test_module_init.py` to fix hanging whitespace due to the replace.

Run this script to do the replacements: https://gist.github.com/vkuzo/7f7afebf8c31b9ba48306223e68a1c82

This is for pytorch#81667

Test plan: CI
Pull Request resolved: pytorch#94170
Approved by: https://github.com/jerryzh168
  • Loading branch information
vkuzo authored and pytorchmergebot committed Feb 7, 2023
1 parent a9f57db commit f15ab8a
Show file tree
Hide file tree
Showing 61 changed files with 355 additions and 355 deletions.
18 changes: 9 additions & 9 deletions test/ao/sparsity/test_composability.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def test_s_prep_before_fusion(self):
)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.intrinsic.quantized.LinearReLU))
self.assertTrue(isinstance(mod[5], torch.ao.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))

# This tests whether performing fusion before sparse prepare causes and issues. The
Expand Down Expand Up @@ -230,7 +230,7 @@ def test_fusion_before_s_prep(self):
tq.convert(mod, inplace=True)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.intrinsic.quantized.LinearReLU))
self.assertTrue(isinstance(mod[5], torch.ao.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))

# check that module was actually sparsified
Expand Down Expand Up @@ -375,7 +375,7 @@ def test_q_prep_fx_before_s_prep(self):
mod = convert_fx(mod)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))

# check that module was actually sparsified
Expand Down Expand Up @@ -433,9 +433,9 @@ def test_q_prep_fx_s_prep_ref_conv(self):
mod = convert_to_reference_fx(mod)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.LinearReLU))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.nn.quantized._reference.Linear))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.ao.nn.quantized.reference.Linear))

# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
Expand Down Expand Up @@ -479,7 +479,7 @@ def test_s_prep_before_q_prep_fx(self):
mod = convert_fx(mod)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))

# check that module was actually sparsified
Expand Down Expand Up @@ -525,7 +525,7 @@ def test_s_prep_before_qat_prep_fx(self):
mod = convert_fx(mod)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))

# check that module was actually sparsified
Expand Down Expand Up @@ -570,9 +570,9 @@ def test_s_prep_q_prep_fx_ref(self):
mod = convert_to_reference_fx(mod)

# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.LinearReLU))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.nn.quantized._reference.Linear))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.ao.nn.quantized.reference.Linear))

# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
Expand Down
12 changes: 6 additions & 6 deletions test/ao/sparsity/test_data_sparsifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,8 @@ def test_ptq_sparsify_first(self):
select_embeddings=select_embeddings,
**sparse_config)

assert type(model.emb1) == torch.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb1) == torch.ao.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb_seq[0] == nn.Embedding)
assert type(model.emb_seq[1] == nn.EmbeddingBag)
assert type(model.linear1) == nn.Linear
Expand Down Expand Up @@ -568,10 +568,10 @@ def test_ptq_quantize_first(self):
sparse_config = {'sparsity_level': 0.8, 'sparse_block_shape': (1, 1)}
post_training_sparse_quantize(model, DataNormSparsifier, sparsify_first=False, **sparse_config)

assert type(model.emb1) == torch.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb_seq[0] == torch.nn.quantized.modules.embedding_ops.Embedding)
assert type(model.emb_seq[1] == torch.nn.quantized.modules.embedding_ops.EmbeddingBag)
assert type(model.emb1) == torch.ao.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb_seq[0] == torch.ao.nn.quantized.modules.embedding_ops.Embedding)
assert type(model.emb_seq[1] == torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag)
assert type(model.linear1) == nn.Linear # not quantized
assert type(model.linear2) == nn.Linear # not quantized

Expand Down
34 changes: 17 additions & 17 deletions test/mobile/model_test/quantization_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
class GeneralQuantModule(torch.nn.Module):
def __init__(self):
super(GeneralQuantModule, self).__init__()
self.embedding = torch.nn.quantized.Embedding(
self.embedding = torch.ao.nn.quantized.Embedding(
num_embeddings=10, embedding_dim=12
)
self.embedding_input = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
self.func = torch.nn.quantized.QFunctional()
self.conv1 = torch.nn.quantized.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.quantized.ConvTranspose2d(16, 33, 3, stride=2)
self.conv3 = torch.nn.quantized.ConvTranspose3d(16, 33, 3, stride=2)
self.func = torch.ao.nn.quantized.QFunctional()
self.conv1 = torch.ao.nn.quantized.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.ao.nn.quantized.ConvTranspose2d(16, 33, 3, stride=2)
self.conv3 = torch.ao.nn.quantized.ConvTranspose3d(16, 33, 3, stride=2)

def forward(self):
a = torch.quantize_per_tensor(torch.tensor([3.0]), 1.0, 0, torch.qint32)
Expand Down Expand Up @@ -52,7 +52,7 @@ def __init__(self):
self.module = self.M()

def getModule(self):
return torch.quantization.quantize_dynamic(self.module, dtype=torch.qint8)
return torch.ao.quantization.quantize_dynamic(self.module, dtype=torch.qint8)

class M(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -117,15 +117,15 @@ def __init__(self):
def getModule(self):
model_fp32 = self.M()
model_fp32.eval()
model_fp32.qconfig = torch.quantization.get_default_qconfig("qnnpack")
model_fp32_prepared = torch.quantization.prepare(model_fp32)
model_int8 = torch.quantization.convert(model_fp32_prepared)
model_fp32.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
model_fp32_prepared = torch.ao.quantization.prepare(model_fp32)
model_int8 = torch.ao.quantization.convert(model_fp32_prepared)
return model_int8

class M(torch.nn.Module):
def __init__(self):
super(StaticQuantModule.M, self).__init__()
self.quant = torch.quantization.QuantStub()
self.quant = torch.ao.quantization.QuantStub()
self.input1d = torch.randn(4, 2, 2)
self.input2d = torch.randn((4, 2, 4, 4))
self.input3d = torch.randn(4, 2, 2, 4, 4)
Expand All @@ -144,7 +144,7 @@ def __init__(self):
nn.Conv3d(2, 2, 1), nn.BatchNorm3d(2), nn.InstanceNorm3d(1), nn.ReLU()
)
self.layer4 = nn.Sequential(nn.Linear(4, 3))
self.dequant = torch.quantization.DeQuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()

def forward(self):
x = self.quant(self.input1d)
Expand All @@ -171,8 +171,8 @@ def __init__(self):
def getModule(self):
model_fp32 = self.M()
model_fp32.eval()
model_fp32.qconfig = torch.quantization.get_default_qconfig("qnnpack")
model_fp32_fused = torch.quantization.fuse_modules(
model_fp32.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
model_fp32_fused = torch.ao.quantization.fuse_modules(
model_fp32,
[
["conv1d", "relu1"],
Expand All @@ -181,14 +181,14 @@ def getModule(self):
["linear", "relu4"],
],
)
model_fp32_prepared = torch.quantization.prepare(model_fp32_fused)
model_int8 = torch.quantization.convert(model_fp32_prepared)
model_fp32_prepared = torch.ao.quantization.prepare(model_fp32_fused)
model_int8 = torch.ao.quantization.convert(model_fp32_prepared)
return model_int8

class M(torch.nn.Module):
def __init__(self):
super(FusedQuantModule.M, self).__init__()
self.quant = torch.quantization.QuantStub()
self.quant = torch.ao.quantization.QuantStub()
self.input1d = torch.randn(4, 2, 2)
self.input2d = torch.randn((4, 2, 4, 4))
self.input3d = torch.randn(4, 2, 2, 4, 4)
Expand All @@ -200,7 +200,7 @@ def __init__(self):
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()

def forward(self):
x = self.input1d
Expand Down
2 changes: 1 addition & 1 deletion test/onnx/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from model_defs.srresnet import SRResNet
from model_defs.super_resolution import SuperResolutionNet
from pytorch_test_common import skipIfUnsupportedMinOpsetVersion, skipScriptTest
from torch import quantization
from torch.ao import quantization
from torch.autograd import Variable
from torch.onnx import OperatorExportTypes
from torch.testing._internal import common_utils
Expand Down
4 changes: 2 additions & 2 deletions test/onnx/test_pytorch_onnx_no_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -1102,11 +1102,11 @@ def test_onnx_aten_fallback_must_not_fallback(self):
class ONNXExportable(torch.nn.Module):
def __init__(self):
super(ONNXExportable, self).__init__()
self.quant = torch.quantization.QuantStub()
self.quant = torch.ao.quantization.QuantStub()
self.fc1 = torch.nn.Linear(12, 8)
self.fc2 = torch.nn.Linear(8, 4)
self.fc3 = torch.nn.Linear(4, 6)
self.dequant = torch.quantization.DeQuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()

def forward(self, x):
x = self.quant(x)
Expand Down
Loading

0 comments on commit f15ab8a

Please sign in to comment.