Skip to content

Commit b162b7c

Browse files
vkuzojainapurva
authored andcommitted
add CI to disallow syntax errors and undefined vars in all Python files (#861)
Summary: Adds two codebase-wide checks for Python files: 1. syntax errors (E999) 2. undefined variables (F821) Both of these resulted in internal breakages recently, so would be good to just have CI block these from landing in OSS. Test Plan: Tested that the new rules pass locally: ``` ruff check --isolated --select E999,F821 ``` Reviewers: Subscribers: Tasks: Tags:
1 parent ced1f50 commit b162b7c

File tree

10 files changed

+19
-8
lines changed

10 files changed

+19
-8
lines changed

.github/workflows/ruff_linter.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ jobs:
2525
- name: Analyzing the code with ruff
2626
run: |
2727
ruff check .
28+
- name: Check all Python files for syntax errors (E999) and undefined vars (F821)
29+
run: |
30+
ruff check --isolated --select E999,F821
2831
- name: Check well formatted code
2932
run: |
3033
ruff format --check

benchmarks/benchmark_gpu_sparsity.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,10 @@ def run_gpu_sparse_benchmark(m, k, n, args):
5656
elif args.eval_fn == "mm":
5757
dense_output = torch.mm(A, x.t())
5858
sparse_output = torch.mm(A_sparse, x.t())
59-
dense_time = benchmark_in_us(torch.mm, A, x.t())
60-
sparse_time = benchmark_in_us(torch.mm, A_sparse, x.t())
59+
# dense_time = benchmark_in_us(torch.mm, A, x.t())
60+
# sparse_time = benchmark_in_us(torch.mm, A_sparse, x.t())
61+
# TODO(future PR) fixme
62+
dense_time, sparse_time = 1.0, 1.0
6163
else:
6264
raise ValueError(f"Unknown eval_fn: {args.eval_fn}")
6365

benchmarks/float8/bench_matmul.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ def run(
113113
scale_b = torch.tensor([1.0], device=device)
114114

115115
def do_matmul(A, B):
116+
nonlocal scale_a
117+
nonlocal scale_b
116118
return torch._scaled_mm(
117119
A, B, scale_a, scale_b, out_dtype=d3, use_fast_accum=fast_accum
118120
)

torchao/dtypes/affine_quantized_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1237,7 +1237,7 @@ def _linear_bf16_act_uint4_weight_check(input_tensor, weight_tensor, bias):
12371237

12381238

12391239
def _linear_bf16_act_uint4_weight_impl(input_tensor, weight_tensor, bias):
1240-
assert weight_tensor.block_size[0] == 1, f"Requires groupwise quantization, got block_size: {block_size}"
1240+
assert weight_tensor.block_size[0] == 1, f"Requires groupwise quantization, got block_size: {weight_tensor.block_size}"
12411241
assert input_tensor.shape[-1] == weight_tensor.shape[1], (
12421242
f"need input_tensor shape: {input_tensor.shape} final"
12431243
f"dim to match weight_tensor shape: {weight_tensor.shape} second dim "

torchao/quantization/prototype/mixed_precision/scripts/BO_acc_modelsize.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import torch.multiprocessing as mp
1818
from ax.modelbridge.cross_validation import cross_validate
1919
from utils import write_history_to_csv, cal_wikitext_ppl, cal_model_size, load_model, quantize_by_fqn_to_config, load_parameters_from_json, load_initial_samples
20+
from BO_acc_throughput import define_parameter_list
2021

2122
# return evaluation results to complete BO trials
2223
def eval(model, tokenizer, num_PPL_eval_samples, fqn_to_config):

torchao/quantization/prototype/mixed_precision/scripts/BO_acc_throughput.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
_load_model,
4646
)
4747

48-
from utils import write_history_to_csv, cal_wikitext_ppl, load_model, quantize_by_fqn_to_config, load_parameters_from_json
48+
from utils import write_history_to_csv, cal_wikitext_ppl, load_model, quantize_by_fqn_to_config, load_parameters_from_json, load_initial_samples
4949

5050
default_device = 'cuda' if torch.cuda.is_available() else 'cpu'
5151

@@ -380,6 +380,8 @@ def run_sequential_BO(device, checkpoint_path, repo_id, num_PPL_eval_samples, nu
380380
parameters_list = load_parameters_from_json(args.parameters_list)
381381

382382
# sample initial points
383+
# TODO(future PR): fix me
384+
initial_samples = []
383385
initial_points_set = load_initial_samples(initial_samples)
384386
num_BO_initial_samples = len(initial_points_set)
385387

torchao/quantization/subclass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):
231231
@staticmethod
232232
def __new__(cls, int_data, q_scales, transposed, shape, dtype=None, **kwargs):
233233
if dtype is None:
234-
dtype = qscales.dtype
234+
dtype = q_scales.dtype
235235
kwargs["dtype"] = dtype
236236
return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]
237237

torchao/sparsity/prototype/superblock/evaluate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
from torchao.sparsity import sparsify_, semi_sparse_weight
1818
from torchao.sparsity.prototype.superblock.supermask import apply_supermask
19-
from torchao.sparsity.prototype.superblock.utils import apply_sparsity, verify_sparsity, mlp_only_with_args
19+
from torchao.sparsity.prototype.superblock.utils import apply_sparsity, verify_sparsity, mlp_only_with_args, simulate_sparsity, accelerate_with_sparsity
2020
from torchao.sparsity.prototype.superblock.train import evaluate, _get_cache_path, load_data
2121
from torchao.sparsity.prototype.sparsifier.weight_norm_sparsifier import WeightNormSparsifier
2222

@@ -56,7 +56,7 @@ def main(args):
5656
model.to(device).bfloat16()
5757

5858
if sparsifier_or_none is not None:
59-
sparsifier.squash_mask()
59+
sparsifier_or_none.squash_mask()
6060
accelerate_with_sparsity(model, args)
6161

6262
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)

torchao/sparsity/prototype/superblock/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
import torch
1313
import torch.distributed as dist
1414

15+
from torchao.quantization import quantize_, int8_dynamic_activation_int8_semi_sparse_weight
1516
from torchao.sparsity import sparsify_, semi_sparse_weight
1617
from torchao.sparsity.prototype.superblock.supermask import SupermaskLinear, apply_supermask
1718
from torchao.sparsity.prototype.superblock.blocksparse import block_sparse_weight

tutorials/developer_api_guide/my_dtype_tensor_subclass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
LayoutType,
2121
PlainLayoutType,
2222
)
23-
from torchao.utils import TorchAOBaseTensor
23+
from torchao.utils import TorchAOBaseTensor, _register_layout_cls, _get_layout_tensor_constructor
2424

2525
aten = torch.ops.aten
2626

0 commit comments

Comments
 (0)