Skip to content

Commit

Permalink
turn on -Werror=type-limits in our Bazel CPU build
Browse files Browse the repository at this point in the history
Summary:
We also fix any existing issues.

Test Plan: Built locally, rely on CI to confirm.

Reviewers: malfet

Subscribers:

Tasks:

Tags:

Pull Request resolved: pytorch#79139

Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
  • Loading branch information
Michael Andreas Dagitses authored and pytorchmergebot committed Jun 10, 2022
1 parent 28c5417 commit f96d96a
Show file tree
Hide file tree
Showing 13 changed files with 50 additions and 35 deletions.
21 changes: 21 additions & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,24 @@ build:cpu-only --platform_suffix=-cpu-only
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
# rules_cuda configuration
build:cpu-only --@rules_cuda//cuda:enable_cuda=False

# Set additional warnings to error level.
#
# Implementation notes:
# * we use file extensions to determine if we are using the C++
# compiler or the cuda compiler
# * we use ^// at the start of the regex to only permit matching
# PyTorch files. This excludes external repos.
#
# Note that because this is logically a command-line flag, it is
# considered the word on what warnings are enabled. This has the
# unfortunate consequence of preventing us from disabling an error at
# the target level because those flags will come before these flags in
# the action invocation. Instead we provide per-file exceptions after
# this.
#
# On the bright side, this means we don't have to more broadly apply
# the exceptions to an entire target.
build \
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits
5 changes: 3 additions & 2 deletions aten/src/ATen/native/cpu/BinaryOpsKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <ATen/native/cpu/Loops.h>
#include <ATen/native/Math.h>
#include <c10/macros/Macros.h>
#include <c10/util/TypeSafeSignMath.h>
#include <c10/util/copysign.h>

namespace at {
Expand Down Expand Up @@ -133,7 +134,7 @@ void div_floor_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_cpu", [&]() {
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
TORCH_CHECK(b != 0, "ZeroDivisionError");
if ((a < 0) != (b < 0)) {
if (c10::is_negative(a) != c10::is_negative(b)) {
// Subtracts one from the results of truncation division if the
// divisor and dividend have different sign(bit)s and the remainder of
// the division is nonzero
Expand Down Expand Up @@ -198,7 +199,7 @@ void remainder_kernel(TensorIteratorBase& iter) {
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
TORCH_CHECK(b != 0, "ZeroDivisionError");
scalar_t r = a % b;
if ((r != 0) && ((r < 0) != (b < 0))) {
if ((r != 0) && (c10::is_negative(r) != c10::is_negative(b))) {
r += b;
}
return r;
Expand Down
5 changes: 3 additions & 2 deletions aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <c10/util/math_compat.h>
#include <c10/util/MathConstants.h>
#include <c10/core/Scalar.h>
#include <c10/util/TypeSafeSignMath.h>
#include <c10/util/irange.h>

#if AT_MKL_ENABLED()
Expand Down Expand Up @@ -284,7 +285,7 @@ void sign_kernel(TensorIteratorBase& iter){

cpu_kernel_vec(
iter,
[=](scalar_t a) -> scalar_t { return (0 < a) - (a < 0); },
[=](scalar_t a) -> scalar_t { return (0 < a) - c10::is_negative(a); },
[=](Vectorized<scalar_t> self_vec){

// Comparison operators returns bitmask.
Expand All @@ -301,7 +302,7 @@ static void signbit_kernel(TensorIteratorBase& iter){
// NOTE: signbit does not always support integral arguments.
if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cpu", [&]() {
cpu_kernel(iter, [](scalar_t a) -> bool { return a < 0; }); });
cpu_kernel(iter, [](scalar_t a) -> bool { return c10::is_negative(a); }); });
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cpu", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
Expand Down
6 changes: 3 additions & 3 deletions caffe2/core/prof_dag_counters.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ void ProfDAGCounters::AddPerOpStartTime(size_t op_id) {
return;
}

CAFFE_ENFORCE(op_id >= 0 && op_id < op_start_times_run_.size());
CAFFE_ENFORCE(op_id < op_start_times_run_.size());
op_start_times_run_[op_id] = timer_.MilliSeconds();
}

Expand All @@ -60,7 +60,7 @@ void ProfDAGCounters::AddPerOpEndTime(size_t op_id) {
return;
}

CAFFE_ENFORCE(op_id >= 0 && op_id < op_end_times_run_.size());
CAFFE_ENFORCE(op_id < op_end_times_run_.size());
op_end_times_run_[op_id] = timer_.MilliSeconds();
}

Expand All @@ -69,7 +69,7 @@ void ProfDAGCounters::AddPerOpAsyncEndTime(size_t op_id) {
return;
}

CAFFE_ENFORCE(op_id >= 0 && op_id < op_async_end_times_run_.size());
CAFFE_ENFORCE(op_id < op_async_end_times_run_.size());
op_async_end_times_run_[op_id] = timer_.MilliSeconds();
}

Expand Down
3 changes: 1 addition & 2 deletions test/cpp/jit/test_lite_interpreter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -563,8 +563,7 @@ TEST(LiteInterpreterTest, GetContainTypes) {
std::stringstream ss;
m._save_for_mobile(ss, {}, true);

auto contained_types = _get_mobile_model_contained_types(ss);
AT_ASSERT(contained_types.size() >= 0);
_get_mobile_model_contained_types(ss);
}

namespace {
Expand Down
3 changes: 0 additions & 3 deletions test/cpp/jit/test_lite_trainer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -384,19 +384,16 @@ TEST(LiteTrainerTest, RandomSamplerReturnsIndicesInCorrectRange) {

std::vector<size_t> indices = sampler.next(3).value();
for (auto i : indices) {
AT_ASSERT(i >= 0);
AT_ASSERT(i < 10);
}

indices = sampler.next(5).value();
for (auto i : indices) {
AT_ASSERT(i >= 0);
AT_ASSERT(i < 10);
}

indices = sampler.next(2).value();
for (auto i : indices) {
AT_ASSERT(i >= 0);
AT_ASSERT(i < 10);
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/distributed/rpc/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -492,8 +492,8 @@ std::vector<at::IValue> readWrappedPayload(
// Read the additional payload remove it from the payload.
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t additionalPayloadSize;
TORCH_INTERNAL_ASSERT(payload.size() >= sizeof(int64_t));
size_t indexToRead = payload.size() - sizeof(int64_t);
TORCH_INTERNAL_ASSERT(indexToRead >= 0);
torch::utils::THP_decodeInt64Buffer(
&additionalPayloadSize,
reinterpret_cast<uint8_t*>(payload.data()) + indexToRead,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/codegen/cuda/compute_at.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -905,7 +905,7 @@ ComputeAt::ComputeAt(
" producer: ",
producer_);
TORCH_INTERNAL_ASSERT(
reference_position_ >= 0 && reference_position_ <= reference_->nDims(),
reference_position_ <= reference_->nDims(),
"Invalid computeAt axis, received ",
reference_position_,
" but should be > -",
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ void checkDimSize(
ExpressionEvaluator const_eval(tv->fusion());
for (auto axis_index : c10::irange(axis.size())) {
TORCH_INTERNAL_ASSERT(
((axis[axis_index] + tv->nDims()) >= 0) &&
((axis[axis_index] + static_cast<int>(tv->nDims())) >= 0) &&
(axis[axis_index] < (int)tv->nDims()),
"CheckDimSize: axis position out of bound ",
axis[axis_index],
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/jit/codegen/cuda/transform_view.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class MergeTransform final : public ViewTransform {
const std::vector<IterDomain*>& new_root_domain,
std::vector<IterDomain*>& rfactor_domain) override {
TORCH_INTERNAL_ASSERT(
index_ >= 0 && (index_ + 1) < new_root_domain.size(),
(index_ + 1) < new_root_domain.size(),
"Index: \t",
index_,
"\t Domain Size:\t",
Expand Down Expand Up @@ -174,7 +174,7 @@ class SplitTransform final : public ViewTransform {
const std::vector<IterDomain*>& new_root_domain,
std::vector<IterDomain*>& rfactor_domain) override {
TORCH_INTERNAL_ASSERT(
index_ >= 0 && index_ < new_root_domain.size(),
index_ < new_root_domain.size(),
"Index: \t",
index_,
"\t Domain Size:\t",
Expand Down Expand Up @@ -237,7 +237,7 @@ class KeepTransform final : public ViewTransform {
const std::vector<IterDomain*>& new_root_domain,
std::vector<IterDomain*>& rfactor_domain) override {
TORCH_INTERNAL_ASSERT(
index_ >= 0 && index_ < new_root_domain.size(),
index_ < new_root_domain.size(),
"Index: \t",
index_,
"\t Domain Size:\t",
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/mobile/function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,13 +211,13 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
out_args.push_back(stack.back());
stack.pop_back();
}
size_t start_index = num_specified_args.value() - out_args.size();
TORCH_CHECK(
start_index >= 0,
num_specified_args.value() >= out_args.size(),
"The number of output arguments is: ",
out_args.size(),
", which is more then the number of specified arguments: ",
num_specified_args.value());
size_t start_index = num_specified_args.value() - out_args.size();
for (size_t i = start_index; i < (args.size() - out_args.size()); ++i) {
TORCH_CHECK(
args[i].default_value().has_value(),
Expand Down
20 changes: 11 additions & 9 deletions torch/csrc/jit/serialization/pickler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <torch/csrc/jit/api/function_impl.h>
#include <torch/csrc/jit/serialization/pickler.h>
#include <string>
#include <type_traits>

namespace torch {
namespace jit {
Expand Down Expand Up @@ -595,18 +596,19 @@ void Pickler::pushDict(const IValue& ivalue) {

push<PickleOpCode>(PickleOpCode::EMPTY_DICT);

if (dict.size() >= 0) {
push<PickleOpCode>(PickleOpCode::MARK);

// Sort the dict for deterministic keys
for (const auto& entry : dict) {
pushIValue(entry.key());
pushIValue(entry.value());
}
static_assert(
std::is_unsigned<decltype(dict.size())>::value,
"Expected size to be non-negative.");
push<PickleOpCode>(PickleOpCode::MARK);

push<PickleOpCode>(PickleOpCode::SETITEMS);
// Sort the dict for deterministic keys
for (const auto& entry : dict) {
pushIValue(entry.key());
pushIValue(entry.value());
}

push<PickleOpCode>(PickleOpCode::SETITEMS);

endTypeTag(ivalue);
}

Expand Down
6 changes: 0 additions & 6 deletions torch/csrc/jit/tensorexpr/loopnest_randomization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -574,9 +574,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {

case COMPRESS_BUFFER: {
auto buffers = NodeFinder<Buf>::find(l.root_stmt());
if (buffers.size() < 0) {
break;
}
int buffer_n = std::rand() % (int)buffers.size();
auto buffer = buffers[buffer_n];

Expand All @@ -589,9 +586,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {

case COMPRESS_ALL_BUFFERS: {
auto buffers = BufFinder::find(l.root_stmt());
if (buffers.size() < 0) {
break;
}

message = "compressAllBuffers(l.root_stmt());\n";
randomization_helper::printHistory(n_transform, message);
Expand Down

0 comments on commit f96d96a

Please sign in to comment.