From f96d96a7fcaa5bb06829d2c7de1992d6ab6e9235 Mon Sep 17 00:00:00 2001 From: Michael Andreas Dagitses Date: Thu, 9 Jun 2022 13:15:50 -0700 Subject: [PATCH] turn on -Werror=type-limits in our Bazel CPU build Summary: We also fix any existing issues. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/79139 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD --- .bazelrc | 21 +++++++++++++++++++ aten/src/ATen/native/cpu/BinaryOpsKernel.cpp | 5 +++-- aten/src/ATen/native/cpu/UnaryOpsKernel.cpp | 5 +++-- caffe2/core/prof_dag_counters.cc | 6 +++--- test/cpp/jit/test_lite_interpreter.cpp | 3 +-- test/cpp/jit/test_lite_trainer.cpp | 3 --- torch/csrc/distributed/rpc/utils.cpp | 2 +- torch/csrc/jit/codegen/cuda/compute_at.cpp | 2 +- .../jit/codegen/cuda/scheduler/mma_utils.cpp | 2 +- .../csrc/jit/codegen/cuda/transform_view.cpp | 6 +++--- torch/csrc/jit/mobile/function.cpp | 4 ++-- torch/csrc/jit/serialization/pickler.cpp | 20 ++++++++++-------- .../jit/tensorexpr/loopnest_randomization.cpp | 6 ------ 13 files changed, 50 insertions(+), 35 deletions(-) diff --git a/.bazelrc b/.bazelrc index 5e847c9270ebc..60d6fda7a7006 100644 --- a/.bazelrc +++ b/.bazelrc @@ -30,3 +30,24 @@ build:cpu-only --platform_suffix=-cpu-only build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin # rules_cuda configuration build:cpu-only --@rules_cuda//cuda:enable_cuda=False + +# Set additional warnings to error level. +# +# Implementation notes: +# * we use file extensions to determine if we are using the C++ +# compiler or the cuda compiler +# * we use ^// at the start of the regex to only permit matching +# PyTorch files. This excludes external repos. +# +# Note that because this is logically a command-line flag, it is +# considered the word on what warnings are enabled. This has the +# unfortunate consequence of preventing us from disabling an error at +# the target level because those flags will come before these flags in +# the action invocation. Instead we provide per-file exceptions after +# this. +# +# On the bright side, this means we don't have to more broadly apply +# the exceptions to an entire target. +build \ + --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \ + --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits diff --git a/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp b/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp index d35b68361782c..2c9ac5ac15b66 100644 --- a/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include namespace at { @@ -133,7 +134,7 @@ void div_floor_kernel(TensorIteratorBase& iter) { AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_cpu", [&]() { cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t { TORCH_CHECK(b != 0, "ZeroDivisionError"); - if ((a < 0) != (b < 0)) { + if (c10::is_negative(a) != c10::is_negative(b)) { // Subtracts one from the results of truncation division if the // divisor and dividend have different sign(bit)s and the remainder of // the division is nonzero @@ -198,7 +199,7 @@ void remainder_kernel(TensorIteratorBase& iter) { cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t { TORCH_CHECK(b != 0, "ZeroDivisionError"); scalar_t r = a % b; - if ((r != 0) && ((r < 0) != (b < 0))) { + if ((r != 0) && (c10::is_negative(r) != c10::is_negative(b))) { r += b; } return r; diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index 0de37a3781d74..ba81ffbcfd4cf 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #if AT_MKL_ENABLED() @@ -284,7 +285,7 @@ void sign_kernel(TensorIteratorBase& iter){ cpu_kernel_vec( iter, - [=](scalar_t a) -> scalar_t { return (0 < a) - (a < 0); }, + [=](scalar_t a) -> scalar_t { return (0 < a) - c10::is_negative(a); }, [=](Vectorized self_vec){ // Comparison operators returns bitmask. @@ -301,7 +302,7 @@ static void signbit_kernel(TensorIteratorBase& iter){ // NOTE: signbit does not always support integral arguments. if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cpu", [&]() { - cpu_kernel(iter, [](scalar_t a) -> bool { return a < 0; }); }); + cpu_kernel(iter, [](scalar_t a) -> bool { return c10::is_negative(a); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cpu", [&]() { using opmath_t = at::opmath_type; diff --git a/caffe2/core/prof_dag_counters.cc b/caffe2/core/prof_dag_counters.cc index a2ba15b868efb..2dfeb9fb587ec 100644 --- a/caffe2/core/prof_dag_counters.cc +++ b/caffe2/core/prof_dag_counters.cc @@ -51,7 +51,7 @@ void ProfDAGCounters::AddPerOpStartTime(size_t op_id) { return; } - CAFFE_ENFORCE(op_id >= 0 && op_id < op_start_times_run_.size()); + CAFFE_ENFORCE(op_id < op_start_times_run_.size()); op_start_times_run_[op_id] = timer_.MilliSeconds(); } @@ -60,7 +60,7 @@ void ProfDAGCounters::AddPerOpEndTime(size_t op_id) { return; } - CAFFE_ENFORCE(op_id >= 0 && op_id < op_end_times_run_.size()); + CAFFE_ENFORCE(op_id < op_end_times_run_.size()); op_end_times_run_[op_id] = timer_.MilliSeconds(); } @@ -69,7 +69,7 @@ void ProfDAGCounters::AddPerOpAsyncEndTime(size_t op_id) { return; } - CAFFE_ENFORCE(op_id >= 0 && op_id < op_async_end_times_run_.size()); + CAFFE_ENFORCE(op_id < op_async_end_times_run_.size()); op_async_end_times_run_[op_id] = timer_.MilliSeconds(); } diff --git a/test/cpp/jit/test_lite_interpreter.cpp b/test/cpp/jit/test_lite_interpreter.cpp index d01c611bbaeca..cf291e5ebb03e 100644 --- a/test/cpp/jit/test_lite_interpreter.cpp +++ b/test/cpp/jit/test_lite_interpreter.cpp @@ -563,8 +563,7 @@ TEST(LiteInterpreterTest, GetContainTypes) { std::stringstream ss; m._save_for_mobile(ss, {}, true); - auto contained_types = _get_mobile_model_contained_types(ss); - AT_ASSERT(contained_types.size() >= 0); + _get_mobile_model_contained_types(ss); } namespace { diff --git a/test/cpp/jit/test_lite_trainer.cpp b/test/cpp/jit/test_lite_trainer.cpp index ede1c3a8355b4..b89dff8f71185 100644 --- a/test/cpp/jit/test_lite_trainer.cpp +++ b/test/cpp/jit/test_lite_trainer.cpp @@ -384,19 +384,16 @@ TEST(LiteTrainerTest, RandomSamplerReturnsIndicesInCorrectRange) { std::vector indices = sampler.next(3).value(); for (auto i : indices) { - AT_ASSERT(i >= 0); AT_ASSERT(i < 10); } indices = sampler.next(5).value(); for (auto i : indices) { - AT_ASSERT(i >= 0); AT_ASSERT(i < 10); } indices = sampler.next(2).value(); for (auto i : indices) { - AT_ASSERT(i >= 0); AT_ASSERT(i < 10); } diff --git a/torch/csrc/distributed/rpc/utils.cpp b/torch/csrc/distributed/rpc/utils.cpp index 571bb235184fe..12e3f2edf7558 100644 --- a/torch/csrc/distributed/rpc/utils.cpp +++ b/torch/csrc/distributed/rpc/utils.cpp @@ -492,8 +492,8 @@ std::vector readWrappedPayload( // Read the additional payload remove it from the payload. // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int64_t additionalPayloadSize; + TORCH_INTERNAL_ASSERT(payload.size() >= sizeof(int64_t)); size_t indexToRead = payload.size() - sizeof(int64_t); - TORCH_INTERNAL_ASSERT(indexToRead >= 0); torch::utils::THP_decodeInt64Buffer( &additionalPayloadSize, reinterpret_cast(payload.data()) + indexToRead, diff --git a/torch/csrc/jit/codegen/cuda/compute_at.cpp b/torch/csrc/jit/codegen/cuda/compute_at.cpp index 77fc513638296..5603d5e54d577 100644 --- a/torch/csrc/jit/codegen/cuda/compute_at.cpp +++ b/torch/csrc/jit/codegen/cuda/compute_at.cpp @@ -905,7 +905,7 @@ ComputeAt::ComputeAt( " producer: ", producer_); TORCH_INTERNAL_ASSERT( - reference_position_ >= 0 && reference_position_ <= reference_->nDims(), + reference_position_ <= reference_->nDims(), "Invalid computeAt axis, received ", reference_position_, " but should be > -", diff --git a/torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp b/torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp index 6cb8be2610110..4eb8ff69e0484 100644 --- a/torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp +++ b/torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp @@ -186,7 +186,7 @@ void checkDimSize( ExpressionEvaluator const_eval(tv->fusion()); for (auto axis_index : c10::irange(axis.size())) { TORCH_INTERNAL_ASSERT( - ((axis[axis_index] + tv->nDims()) >= 0) && + ((axis[axis_index] + static_cast(tv->nDims())) >= 0) && (axis[axis_index] < (int)tv->nDims()), "CheckDimSize: axis position out of bound ", axis[axis_index], diff --git a/torch/csrc/jit/codegen/cuda/transform_view.cpp b/torch/csrc/jit/codegen/cuda/transform_view.cpp index 49e525d936d6b..290875c9c6d90 100644 --- a/torch/csrc/jit/codegen/cuda/transform_view.cpp +++ b/torch/csrc/jit/codegen/cuda/transform_view.cpp @@ -115,7 +115,7 @@ class MergeTransform final : public ViewTransform { const std::vector& new_root_domain, std::vector& rfactor_domain) override { TORCH_INTERNAL_ASSERT( - index_ >= 0 && (index_ + 1) < new_root_domain.size(), + (index_ + 1) < new_root_domain.size(), "Index: \t", index_, "\t Domain Size:\t", @@ -174,7 +174,7 @@ class SplitTransform final : public ViewTransform { const std::vector& new_root_domain, std::vector& rfactor_domain) override { TORCH_INTERNAL_ASSERT( - index_ >= 0 && index_ < new_root_domain.size(), + index_ < new_root_domain.size(), "Index: \t", index_, "\t Domain Size:\t", @@ -237,7 +237,7 @@ class KeepTransform final : public ViewTransform { const std::vector& new_root_domain, std::vector& rfactor_domain) override { TORCH_INTERNAL_ASSERT( - index_ >= 0 && index_ < new_root_domain.size(), + index_ < new_root_domain.size(), "Index: \t", index_, "\t Domain Size:\t", diff --git a/torch/csrc/jit/mobile/function.cpp b/torch/csrc/jit/mobile/function.cpp index c963fcb85828c..a69e409746a6f 100644 --- a/torch/csrc/jit/mobile/function.cpp +++ b/torch/csrc/jit/mobile/function.cpp @@ -211,13 +211,13 @@ c10::optional> makeOperatorFunction( out_args.push_back(stack.back()); stack.pop_back(); } - size_t start_index = num_specified_args.value() - out_args.size(); TORCH_CHECK( - start_index >= 0, + num_specified_args.value() >= out_args.size(), "The number of output arguments is: ", out_args.size(), ", which is more then the number of specified arguments: ", num_specified_args.value()); + size_t start_index = num_specified_args.value() - out_args.size(); for (size_t i = start_index; i < (args.size() - out_args.size()); ++i) { TORCH_CHECK( args[i].default_value().has_value(), diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp index ac365142411e1..650dd638bcaa1 100644 --- a/torch/csrc/jit/serialization/pickler.cpp +++ b/torch/csrc/jit/serialization/pickler.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace torch { namespace jit { @@ -595,18 +596,19 @@ void Pickler::pushDict(const IValue& ivalue) { push(PickleOpCode::EMPTY_DICT); - if (dict.size() >= 0) { - push(PickleOpCode::MARK); - - // Sort the dict for deterministic keys - for (const auto& entry : dict) { - pushIValue(entry.key()); - pushIValue(entry.value()); - } + static_assert( + std::is_unsigned::value, + "Expected size to be non-negative."); + push(PickleOpCode::MARK); - push(PickleOpCode::SETITEMS); + // Sort the dict for deterministic keys + for (const auto& entry : dict) { + pushIValue(entry.key()); + pushIValue(entry.value()); } + push(PickleOpCode::SETITEMS); + endTypeTag(ivalue); } diff --git a/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp b/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp index b7745eac06825..c603d5ead3d8c 100644 --- a/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp +++ b/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp @@ -574,9 +574,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) { case COMPRESS_BUFFER: { auto buffers = NodeFinder::find(l.root_stmt()); - if (buffers.size() < 0) { - break; - } int buffer_n = std::rand() % (int)buffers.size(); auto buffer = buffers[buffer_n]; @@ -589,9 +586,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) { case COMPRESS_ALL_BUFFERS: { auto buffers = BufFinder::find(l.root_stmt()); - if (buffers.size() < 0) { - break; - } message = "compressAllBuffers(l.root_stmt());\n"; randomization_helper::printHistory(n_transform, message);