Skip to content

Commit 52c2c6b

Browse files
committed
Fix compile warnings.
Fixes instances of comparisons between 'signed' and 'unsigned'.
1 parent ae1668f commit 52c2c6b

File tree

12 files changed

+40
-26
lines changed

12 files changed

+40
-26
lines changed

tensorflow/compiler/xla/layout_util.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,8 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
287287
/* static */ std::vector<int64> LayoutUtil::MakeLogicalToPhysical(
288288
const Layout& layout) {
289289
std::vector<int64> logical_to_physical(layout.minor_to_major_size());
290-
for (int64 physical = 0; physical < logical_to_physical.size(); ++physical) {
290+
for (std::vector<int64>::size_type physical = 0;
291+
physical < logical_to_physical.size(); ++physical) {
291292
const int64 logical = Major(layout, physical);
292293
logical_to_physical[logical] = physical;
293294
}

tensorflow/compiler/xla/metric_table_report.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,10 @@ void MetricTableReport::WriteReportToInfoLog(double expected_metric_sum) {
7676
// Write something to the log normally to get the date-time and file prefix.
7777
LOG(INFO) << "Writing report to log.";
7878

79-
int64 pos = 0;
79+
string::size_type pos = 0;
8080
const string report = MakeReport(expected_metric_sum);
8181
while (pos < report.size()) {
82-
int64 end_of_line = report.find('\n', pos);
82+
size_t end_of_line = report.find('\n', pos);
8383
if (end_of_line == string::npos) {
8484
end_of_line = report.size();
8585
}
@@ -247,7 +247,7 @@ string MetricTableReport::MetricString(double metric) {
247247
sp1.remove_prefix(1);
248248
}
249249
// Copy rest of input characters.
250-
for (int64 i = 0; i < sp1.size(); ++i) {
250+
for (tensorflow::StringPiece::size_type i = 0; i < sp1.size(); ++i) {
251251
if (i > 0 && (sp1.size() - i) % 3 == 0) {
252252
output.push_back(',');
253253
}

tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -191,12 +191,12 @@ tensorflow::Status DotOpEmitter::Emit() {
191191
// from the rhs index are the lower dimensions in the index so we add them
192192
// first.
193193
llvm_ir::IrArray::Index target_index;
194-
for (int dimension = 0; dimension < lhs_index.size(); ++dimension) {
194+
for (size_t dimension = 0; dimension < lhs_index.size(); ++dimension) {
195195
if (dimension != lhs_reduction_dimension) {
196196
target_index.push_back(lhs_index[dimension]);
197197
}
198198
}
199-
for (int dimension = 0; dimension < rhs_index.size(); ++dimension) {
199+
for (size_t dimension = 0; dimension < rhs_index.size(); ++dimension) {
200200
if (dimension != rhs_reduction_dimension) {
201201
target_index.push_back(rhs_index[dimension]);
202202
}
@@ -332,7 +332,7 @@ llvm_ir::IrArray::Index DotOpEmitter::EmitOperandArrayLoopNest(
332332
llvm_ir::IrArray::Index index =
333333
loop_nest->AddLoopsForShapeOnDimensions(shape, dimensions, name_suffix);
334334
// Verify every dimension except the reduction dimension was set in the index.
335-
for (int dimension = 0; dimension < index.size(); ++dimension) {
335+
for (size_t dimension = 0; dimension < index.size(); ++dimension) {
336336
if (dimension == reduction_dimension) {
337337
DCHECK_EQ(nullptr, index[dimension]);
338338
} else {

tensorflow/compiler/xla/service/generic_transfer_manager.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,8 @@ Status GenericTransferManager::TransferLiteralFromDevice(
6868
ShallowCopyTupleFromDevice(executor, source, device_shape));
6969
TF_RET_CHECK(element_buffers.size() ==
7070
ShapeUtil::TupleElementCount(device_shape));
71-
for (int64 i = 0; i < element_buffers.size(); ++i) {
71+
for (std::vector<se::DeviceMemoryBase>::size_type i = 0;
72+
i < element_buffers.size(); ++i) {
7273
const Shape& element_device_shape = device_shape.tuple_shapes(i);
7374
const Shape& element_literal_shape = literal_shape.tuple_shapes(i);
7475
Literal* element_literal = literal->add_tuple_literals();

tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ class DumpIrPass : public llvm::FunctionPass {
8080
char DumpIrPass::id_ = 0;
8181

8282
void IrDumpingPassManager::run(llvm::Module &module) {
83-
for (int i = 0; i < passes_.size(); ++i) {
83+
for (std::vector<llvm::Pass*>::size_type i = 0; i < passes_.size(); ++i) {
8484
llvm::Pass *P = passes_[i];
8585
if (dump_ir_) {
8686
const llvm::PassInfo *PI =

tensorflow/compiler/xla/service/hlo_instruction.cc

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,8 @@ HloInstruction* HloInstruction::CloneAndFuseInternal(
548548
// See if this operand is already an operand of the fusion node.
549549
CHECK_EQ(operands_.size(), fused_parameters_.size());
550550
HloInstruction* fused_param = nullptr;
551-
for (int64 i = 0; i < operands_.size(); ++i) {
551+
for (std::vector<HloInstruction*>::size_type i = 0;
552+
i < operands_.size(); ++i) {
552553
if (operands_[i] == operand) {
553554
fused_param = fused_parameters_[i];
554555
break;
@@ -639,7 +640,8 @@ void HloInstruction::CheckFusionInstruction() const {
639640
CHECK(!root_owned);
640641
root_owned = true;
641642
}
642-
for (int i = 0; i < fused_parameters_.size(); ++i) {
643+
for (std::vector<HloInstruction*>::size_type i = 0;
644+
i < fused_parameters_.size(); ++i) {
643645
if (fused_parameters_[i] == instruction.get()) {
644646
CHECK(!parameter_owned[i]);
645647
parameter_owned[i] = true;
@@ -648,7 +650,7 @@ void HloInstruction::CheckFusionInstruction() const {
648650
}
649651
CHECK(root_owned);
650652
// Make sure all the parameter_owned entries are set
651-
for (int i = 0; i < parameter_owned.size(); i++) {
653+
for (std::vector<bool>::size_type i = 0; i < parameter_owned.size(); i++) {
652654
CHECK(parameter_owned[i]);
653655
}
654656

@@ -681,7 +683,7 @@ void HloInstruction::CheckFusionInstruction() const {
681683
operands_[param_no]->shape()));
682684
}
683685
// Make sure all the parameter_numbers entries were seen
684-
for (int i = 0; i < parameter_numbers.size(); i++) {
686+
for (std::vector<bool>::size_type i = 0; i < parameter_numbers.size(); i++) {
685687
CHECK(parameter_numbers[i]);
686688
}
687689

@@ -1384,7 +1386,7 @@ string HloInstruction::ToString(bool compact_operands) const {
13841386
}
13851387
if (!slice_starts_.empty() && !slice_limits_.empty()) {
13861388
std::vector<string> bounds;
1387-
for (int i = 0; i < slice_starts_.size(); ++i) {
1389+
for (std::vector<int64>::size_type i = 0; i < slice_starts_.size(); ++i) {
13881390
bounds.push_back(tensorflow::strings::StrCat("[", slice_starts_[i], ":",
13891391
slice_limits_[i], "]"));
13901392
}
@@ -1399,7 +1401,8 @@ string HloInstruction::ToString(bool compact_operands) const {
13991401
const auto append_dims = [&](const std::vector<string>& dims,
14001402
const Shape& shape) {
14011403
CHECK_EQ(dims.size(), ShapeUtil::Rank(shape));
1402-
for (int64 logical = 0; logical < dims.size(); ++logical) {
1404+
for (std::vector<string>::size_type logical = 0; logical < dims.size();
1405+
++logical) {
14031406
int64 physical = logical;
14041407
if (!shape.layout().minor_to_major().empty()) {
14051408
physical = LayoutUtil::Major(shape.layout(), logical);
@@ -1812,7 +1815,8 @@ namespace {
18121815
bool OrderIsTopologicalSort(const std::vector<const HloInstruction*>& order) {
18131816
// Create a map from instruction to its position in 'order'.
18141817
std::unordered_map<const HloInstruction*, int> order_position;
1815-
for (int i = 0; i < order.size(); i++) {
1818+
for (std::vector<const HloInstruction*>::size_type i = 0; i < order.size();
1819+
i++) {
18161820
if (!order_position.insert({order[i], i}).second) {
18171821
// Instruction order[i] is duplicated in the order.
18181822
return false;
@@ -2059,7 +2063,8 @@ HloInstruction::UseKind HloInstruction::OperandElementUse(int64 i) const {
20592063
return UseKind::kUse;
20602064
}
20612065
if (cache.count(&hlo) == 0) {
2062-
for (int64 j = 0; j < hlo.operands_.size(); ++j) {
2066+
for (std::vector<HloInstruction*>::size_type j = 0;
2067+
j < hlo.operands_.size(); ++j) {
20632068
UseKind old = cache[&hlo];
20642069
UseKind updated = plus(
20652070
old, std::min(hlo.OperandElementUse(j),

tensorflow/compiler/xla/service/hlo_ordering.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,8 @@ SequentialHloOrdering::SequentialHloOrdering(
9090
// Create a map from instruction to its order position.
9191
for (auto computation_order : module_sequence_) {
9292
const std::vector<const HloInstruction*>& order = computation_order.second;
93-
for (int i = 0; i < order.size(); ++i) {
93+
for (std::vector<const HloInstruction*>::size_type i = 0; i < order.size();
94+
++i) {
9495
DCHECK_EQ(0, order_position_.count(order[i]));
9596
order_position_.emplace(order[i], i);
9697
}

tensorflow/compiler/xla/service/llvm_ir/ir_array.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ llvm::Value* IrArray::EmitArrayElementAddress(
200200
// We perform broadcasting when the operand shape has dimension(s) of size
201201
// 1. In this case we fix the index value for that dimension to zero. This
202202
// effectively broadcasts along this dimension.
203-
for (int64 i = 0; i < index.size(); ++i) {
203+
for (size_t i = 0; i < index.size(); ++i) {
204204
auto dim = shape_->dimensions(i);
205205
actual_index.push_back(dim == 1 ? ir_builder->getInt64(0) : index[i]);
206206
is_implicit_broadcast |= dim == 1;

tensorflow/compiler/xla/service/service.cc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,8 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
348348
const string& directory_path = flags->xla_dump_computations_to;
349349
const string& other_directory_path = flags->xla_dump_executions_to;
350350
if ((!directory_path.empty() || !other_directory_path.empty())) {
351-
for (int64 i = 0; i < versioned_handles.size(); ++i) {
351+
for (std::vector<VersionedComputationHandle>::size_type i = 0;
352+
i < versioned_handles.size(); ++i) {
352353
TF_ASSIGN_OR_RETURN(std::unique_ptr<SessionModule> session_module,
353354
computation_tracker_.SnapshotComputation(
354355
versioned_handles[i].handle));
@@ -537,7 +538,8 @@ Service::ExecuteParallelAndRegisterResult(
537538
}
538539

539540
// Wait for all executions to complete.
540-
for (int64 i = 0; i < result_handles.size(); ++i) {
541+
for (std::vector<GlobalDataHandle>::size_type i = 0;
542+
i < result_handles.size(); ++i) {
541543
if (!streams[i]->BlockHostUntilDone()) {
542544
return InternalError("failed to complete execution for stream %lld", i);
543545
}

tensorflow/compiler/xla/service/shape_inference.cc

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1037,7 +1037,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
10371037
}
10381038

10391039
std::vector<int64> sizes;
1040-
for (int64 dimension = 0; dimension < starts.size(); ++dimension) {
1040+
for (tensorflow::gtl::ArraySlice<int64>::size_type dimension = 0;
1041+
dimension < starts.size(); ++dimension) {
10411042
int64 start_index = starts[dimension];
10421043
int64 limit_index = limits[dimension];
10431044
if (start_index < 0) {
@@ -1110,7 +1111,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
11101111
slice_sizes.size(), ShapeUtil::Rank(operand_shape));
11111112
}
11121113

1113-
for (int64 dim = 0; dim < slice_sizes.size(); ++dim) {
1114+
for (tensorflow::gtl::ArraySlice<int64>::size_type dim = 0;
1115+
dim < slice_sizes.size(); ++dim) {
11141116
const int64 input_dim_size = operand_shape.dimensions(dim);
11151117
const int64 slice_dim_size = slice_sizes[dim];
11161118
if (slice_dim_size <= 0) {
@@ -1370,7 +1372,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
13701372
}
13711373

13721374
// All arguments must be compatible with the program shape.
1373-
for (int i = 0; i < arg_shapes.size(); ++i) {
1375+
for (tensorflow::gtl::ArraySlice<const Shape*>::size_type i = 0;
1376+
i < arg_shapes.size(); ++i) {
13741377
const Shape& arg_shape = *arg_shapes[i];
13751378
const Shape& param_shape = to_apply.parameters(i);
13761379
if (!ShapeUtil::Compatible(arg_shape, param_shape)) {

tensorflow/compiler/xla/service/tuple_points_to_analysis.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,8 @@ Status TuplePointsToAnalysis::HandleTuple(
280280

281281
// A tuple contains references to all input operands and transitively any
282282
// references in those operands.
283-
for (int64 i = 0; i < operands.size(); ++i) {
283+
for (tensorflow::gtl::ArraySlice<HloInstruction*>::size_type i = 0;
284+
i < operands.size(); ++i) {
284285
const PointsToSet& operand_points_to_set =
285286
*FindOrDie(points_to_, operands[i]);
286287

tensorflow/compiler/xla/shape_util.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts) {
643643
// The dimensions in minor_to_major need to be renumbered to account for the
644644
// degenerate dimensions which have removed. Decrement each dimension number
645645
// once for each degenerate dimension which has a smaller number.
646-
for (int i = 0; i < minor_to_major.size(); ++i) {
646+
for (std::vector<int64>::size_type i = 0; i < minor_to_major.size(); ++i) {
647647
int adjustment = 0;
648648
for (int64 dim : degenerate_dimensions) {
649649
if (minor_to_major[i] > dim) {

0 commit comments

Comments
 (0)