Skip to content

Commit

Permalink
Merge pull request tensorflow#8258 from tensorflow/revert-8206-fix_xl…
Browse files Browse the repository at this point in the history
…a_compiler_warnings

Revert "Fix compile warnings."
  • Loading branch information
gunan authored Mar 10, 2017
2 parents f5c700f + 612513f commit 88b3a8b
Show file tree
Hide file tree
Showing 12 changed files with 26 additions and 40 deletions.
3 changes: 1 addition & 2 deletions tensorflow/compiler/xla/layout_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
/* static */ std::vector<int64> LayoutUtil::MakeLogicalToPhysical(
const Layout& layout) {
std::vector<int64> logical_to_physical(layout.minor_to_major_size());
for (std::vector<int64>::size_type physical = 0;
physical < logical_to_physical.size(); ++physical) {
for (int64 physical = 0; physical < logical_to_physical.size(); ++physical) {
const int64 logical = Major(layout, physical);
logical_to_physical[logical] = physical;
}
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/compiler/xla/metric_table_report.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,10 @@ void MetricTableReport::WriteReportToInfoLog(double expected_metric_sum) {
// Write something to the log normally to get the date-time and file prefix.
LOG(INFO) << "Writing report to log.";

string::size_type pos = 0;
int64 pos = 0;
const string report = MakeReport(expected_metric_sum);
while (pos < report.size()) {
size_t end_of_line = report.find('\n', pos);
int64 end_of_line = report.find('\n', pos);
if (end_of_line == string::npos) {
end_of_line = report.size();
}
Expand Down Expand Up @@ -247,7 +247,7 @@ string MetricTableReport::MetricString(double metric) {
sp1.remove_prefix(1);
}
// Copy rest of input characters.
for (tensorflow::StringPiece::size_type i = 0; i < sp1.size(); ++i) {
for (int64 i = 0; i < sp1.size(); ++i) {
if (i > 0 && (sp1.size() - i) % 3 == 0) {
output.push_back(',');
}
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -191,12 +191,12 @@ tensorflow::Status DotOpEmitter::Emit() {
// from the rhs index are the lower dimensions in the index so we add them
// first.
llvm_ir::IrArray::Index target_index;
for (size_t dimension = 0; dimension < lhs_index.size(); ++dimension) {
for (int dimension = 0; dimension < lhs_index.size(); ++dimension) {
if (dimension != lhs_reduction_dimension) {
target_index.push_back(lhs_index[dimension]);
}
}
for (size_t dimension = 0; dimension < rhs_index.size(); ++dimension) {
for (int dimension = 0; dimension < rhs_index.size(); ++dimension) {
if (dimension != rhs_reduction_dimension) {
target_index.push_back(rhs_index[dimension]);
}
Expand Down Expand Up @@ -332,7 +332,7 @@ llvm_ir::IrArray::Index DotOpEmitter::EmitOperandArrayLoopNest(
llvm_ir::IrArray::Index index =
loop_nest->AddLoopsForShapeOnDimensions(shape, dimensions, name_suffix);
// Verify every dimension except the reduction dimension was set in the index.
for (size_t dimension = 0; dimension < index.size(); ++dimension) {
for (int dimension = 0; dimension < index.size(); ++dimension) {
if (dimension == reduction_dimension) {
DCHECK_EQ(nullptr, index[dimension]);
} else {
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/compiler/xla/service/generic_transfer_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ Status GenericTransferManager::TransferLiteralFromDevice(
ShallowCopyTupleFromDevice(executor, source, device_shape));
TF_RET_CHECK(element_buffers.size() ==
ShapeUtil::TupleElementCount(device_shape));
for (std::vector<se::DeviceMemoryBase>::size_type i = 0;
i < element_buffers.size(); ++i) {
for (int64 i = 0; i < element_buffers.size(); ++i) {
const Shape& element_device_shape = device_shape.tuple_shapes(i);
const Shape& element_literal_shape = literal_shape.tuple_shapes(i);
Literal* element_literal = literal->add_tuple_literals();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class DumpIrPass : public llvm::FunctionPass {
char DumpIrPass::id_ = 0;

void IrDumpingPassManager::run(llvm::Module &module) {
for (std::vector<llvm::Pass*>::size_type i = 0; i < passes_.size(); ++i) {
for (int i = 0; i < passes_.size(); ++i) {
llvm::Pass *P = passes_[i];
if (dump_ir_) {
const llvm::PassInfo *PI =
Expand Down
21 changes: 8 additions & 13 deletions tensorflow/compiler/xla/service/hlo_instruction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -548,8 +548,7 @@ HloInstruction* HloInstruction::CloneAndFuseInternal(
// See if this operand is already an operand of the fusion node.
CHECK_EQ(operands_.size(), fused_parameters_.size());
HloInstruction* fused_param = nullptr;
for (std::vector<HloInstruction*>::size_type i = 0;
i < operands_.size(); ++i) {
for (int64 i = 0; i < operands_.size(); ++i) {
if (operands_[i] == operand) {
fused_param = fused_parameters_[i];
break;
Expand Down Expand Up @@ -640,8 +639,7 @@ void HloInstruction::CheckFusionInstruction() const {
CHECK(!root_owned);
root_owned = true;
}
for (std::vector<HloInstruction*>::size_type i = 0;
i < fused_parameters_.size(); ++i) {
for (int i = 0; i < fused_parameters_.size(); ++i) {
if (fused_parameters_[i] == instruction.get()) {
CHECK(!parameter_owned[i]);
parameter_owned[i] = true;
Expand All @@ -650,7 +648,7 @@ void HloInstruction::CheckFusionInstruction() const {
}
CHECK(root_owned);
// Make sure all the parameter_owned entries are set
for (std::vector<bool>::size_type i = 0; i < parameter_owned.size(); i++) {
for (int i = 0; i < parameter_owned.size(); i++) {
CHECK(parameter_owned[i]);
}

Expand Down Expand Up @@ -683,7 +681,7 @@ void HloInstruction::CheckFusionInstruction() const {
operands_[param_no]->shape()));
}
// Make sure all the parameter_numbers entries were seen
for (std::vector<bool>::size_type i = 0; i < parameter_numbers.size(); i++) {
for (int i = 0; i < parameter_numbers.size(); i++) {
CHECK(parameter_numbers[i]);
}

Expand Down Expand Up @@ -1386,7 +1384,7 @@ string HloInstruction::ToString(bool compact_operands) const {
}
if (!slice_starts_.empty() && !slice_limits_.empty()) {
std::vector<string> bounds;
for (std::vector<int64>::size_type i = 0; i < slice_starts_.size(); ++i) {
for (int i = 0; i < slice_starts_.size(); ++i) {
bounds.push_back(tensorflow::strings::StrCat("[", slice_starts_[i], ":",
slice_limits_[i], "]"));
}
Expand All @@ -1401,8 +1399,7 @@ string HloInstruction::ToString(bool compact_operands) const {
const auto append_dims = [&](const std::vector<string>& dims,
const Shape& shape) {
CHECK_EQ(dims.size(), ShapeUtil::Rank(shape));
for (std::vector<string>::size_type logical = 0; logical < dims.size();
++logical) {
for (int64 logical = 0; logical < dims.size(); ++logical) {
int64 physical = logical;
if (!shape.layout().minor_to_major().empty()) {
physical = LayoutUtil::Major(shape.layout(), logical);
Expand Down Expand Up @@ -1815,8 +1812,7 @@ namespace {
bool OrderIsTopologicalSort(const std::vector<const HloInstruction*>& order) {
// Create a map from instruction to its position in 'order'.
std::unordered_map<const HloInstruction*, int> order_position;
for (std::vector<const HloInstruction*>::size_type i = 0; i < order.size();
i++) {
for (int i = 0; i < order.size(); i++) {
if (!order_position.insert({order[i], i}).second) {
// Instruction order[i] is duplicated in the order.
return false;
Expand Down Expand Up @@ -2063,8 +2059,7 @@ HloInstruction::UseKind HloInstruction::OperandElementUse(int64 i) const {
return UseKind::kUse;
}
if (cache.count(&hlo) == 0) {
for (std::vector<HloInstruction*>::size_type j = 0;
j < hlo.operands_.size(); ++j) {
for (int64 j = 0; j < hlo.operands_.size(); ++j) {
UseKind old = cache[&hlo];
UseKind updated = plus(
old, std::min(hlo.OperandElementUse(j),
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/compiler/xla/service/hlo_ordering.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,7 @@ SequentialHloOrdering::SequentialHloOrdering(
// Create a map from instruction to its order position.
for (auto computation_order : module_sequence_) {
const std::vector<const HloInstruction*>& order = computation_order.second;
for (std::vector<const HloInstruction*>::size_type i = 0; i < order.size();
++i) {
for (int i = 0; i < order.size(); ++i) {
DCHECK_EQ(0, order_position_.count(order[i]));
order_position_.emplace(order[i], i);
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ llvm::Value* IrArray::EmitArrayElementAddress(
// We perform broadcasting when the operand shape has dimension(s) of size
// 1. In this case we fix the index value for that dimension to zero. This
// effectively broadcasts along this dimension.
for (size_t i = 0; i < index.size(); ++i) {
for (int64 i = 0; i < index.size(); ++i) {
auto dim = shape_->dimensions(i);
actual_index.push_back(dim == 1 ? ir_builder->getInt64(0) : index[i]);
is_implicit_broadcast |= dim == 1;
Expand Down
6 changes: 2 additions & 4 deletions tensorflow/compiler/xla/service/service.cc
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,7 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
const string& directory_path = flags->xla_dump_computations_to;
const string& other_directory_path = flags->xla_dump_executions_to;
if ((!directory_path.empty() || !other_directory_path.empty())) {
for (std::vector<VersionedComputationHandle>::size_type i = 0;
i < versioned_handles.size(); ++i) {
for (int64 i = 0; i < versioned_handles.size(); ++i) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<SessionModule> session_module,
computation_tracker_.SnapshotComputation(
versioned_handles[i].handle));
Expand Down Expand Up @@ -538,8 +537,7 @@ Service::ExecuteParallelAndRegisterResult(
}

// Wait for all executions to complete.
for (std::vector<GlobalDataHandle>::size_type i = 0;
i < result_handles.size(); ++i) {
for (int64 i = 0; i < result_handles.size(); ++i) {
if (!streams[i]->BlockHostUntilDone()) {
return InternalError("failed to complete execution for stream %lld", i);
}
Expand Down
9 changes: 3 additions & 6 deletions tensorflow/compiler/xla/service/shape_inference.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1037,8 +1037,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
}

std::vector<int64> sizes;
for (tensorflow::gtl::ArraySlice<int64>::size_type dimension = 0;
dimension < starts.size(); ++dimension) {
for (int64 dimension = 0; dimension < starts.size(); ++dimension) {
int64 start_index = starts[dimension];
int64 limit_index = limits[dimension];
if (start_index < 0) {
Expand Down Expand Up @@ -1111,8 +1110,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
slice_sizes.size(), ShapeUtil::Rank(operand_shape));
}

for (tensorflow::gtl::ArraySlice<int64>::size_type dim = 0;
dim < slice_sizes.size(); ++dim) {
for (int64 dim = 0; dim < slice_sizes.size(); ++dim) {
const int64 input_dim_size = operand_shape.dimensions(dim);
const int64 slice_dim_size = slice_sizes[dim];
if (slice_dim_size <= 0) {
Expand Down Expand Up @@ -1372,8 +1370,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
}

// All arguments must be compatible with the program shape.
for (tensorflow::gtl::ArraySlice<const Shape*>::size_type i = 0;
i < arg_shapes.size(); ++i) {
for (int i = 0; i < arg_shapes.size(); ++i) {
const Shape& arg_shape = *arg_shapes[i];
const Shape& param_shape = to_apply.parameters(i);
if (!ShapeUtil::Compatible(arg_shape, param_shape)) {
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,7 @@ Status TuplePointsToAnalysis::HandleTuple(

// A tuple contains references to all input operands and transitively any
// references in those operands.
for (tensorflow::gtl::ArraySlice<HloInstruction*>::size_type i = 0;
i < operands.size(); ++i) {
for (int64 i = 0; i < operands.size(); ++i) {
const PointsToSet& operand_points_to_set =
*FindOrDie(points_to_, operands[i]);

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/xla/shape_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,7 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts) {
// The dimensions in minor_to_major need to be renumbered to account for the
// degenerate dimensions which have removed. Decrement each dimension number
// once for each degenerate dimension which has a smaller number.
for (std::vector<int64>::size_type i = 0; i < minor_to_major.size(); ++i) {
for (int i = 0; i < minor_to_major.size(); ++i) {
int adjustment = 0;
for (int64 dim : degenerate_dimensions) {
if (minor_to_major[i] > dim) {
Expand Down

0 comments on commit 88b3a8b

Please sign in to comment.