diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD index 692d186b14dfd5..dd44c0beb07e73 100644 --- a/tensorflow/compiler/xla/service/BUILD +++ b/tensorflow/compiler/xla/service/BUILD @@ -1397,6 +1397,7 @@ cc_library( "//tensorflow/compiler/xla:literal_util", "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla:types", + "//tensorflow/compiler/xla:window_util", "//tensorflow/compiler/xla/legacy_flags:hlo_graph_dumper_flags", "//tensorflow/core:lib", ], diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc index 3f8a2f9859961d..fa0e3d934c0eff 100644 --- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc +++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/compiler/xla/service/hlo_module.h" #include "tensorflow/compiler/xla/shape_util.h" #include "tensorflow/compiler/xla/types.h" +#include "tensorflow/compiler/xla/window_util.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/numbers.h" @@ -56,68 +57,6 @@ string ComputationId(const HloComputation* computation) { return Printf("%lld", reinterpret_cast(computation)); } -// Returns a compact string that represents the convolution dimension numbers. -string ConvolutionDimensionNumbersToString( - const ConvolutionDimensionNumbers& dim_numbers) { - return Printf("B@%lld,Z@%lld,KIZ@%lld,KOZ@%lld", - dim_numbers.batch_dimension(), dim_numbers.feature_dimension(), - dim_numbers.kernel_input_feature_dimension(), - dim_numbers.kernel_output_feature_dimension()); -} - -// Returns a compact string that represents the non-trivial fields in the window -// description. If there are no non-trivial fields, the empty string is -// returned. -string WindowToString(const Window& window) { - bool display_padding = false; - bool display_window_dilation = false; - bool display_base_dilation = false; - bool display_stride = false; - for (const WindowDimension& dimension : window.dimensions()) { - display_padding |= - dimension.padding_low() != 0 || dimension.padding_high() != 0; - display_window_dilation |= dimension.window_dilation() != 1; - display_base_dilation |= dimension.base_dilation() != 1; - display_stride |= dimension.stride() != 1; - } - std::vector pieces = {}; - if (display_padding) { - pieces.push_back("\\n"); - pieces.push_back("padding=["); - for (const WindowDimension& dimension : window.dimensions()) { - pieces.push_back(StrCat("(", dimension.padding_low(), ",", - dimension.padding_high(), ")")); - pieces.push_back(", "); - } - pieces.pop_back(); - pieces.push_back("]"); - } - // Make a convenient lambda that adds a simple int64 field in each - // WindowDimension. - auto add_field = [&pieces, &window]( - const string& label, - tensorflow::protobuf_int64 (WindowDimension::*member)() const) { - pieces.push_back("\\n"); - pieces.push_back(label + "=["); - for (const WindowDimension& dimension : window.dimensions()) { - pieces.push_back(StrCat(((&dimension)->*member)())); - pieces.push_back(", "); - } - pieces.pop_back(); - pieces.push_back("]"); - }; - if (display_window_dilation) { - add_field("window_dilation", &WindowDimension::window_dilation); - } - if (display_base_dilation) { - add_field("base_dilation", &WindowDimension::base_dilation); - } - if (display_stride) { - add_field("stride", &WindowDimension::stride); - } - return Join(pieces, ""); -} - // Returns the dot graph edges and nodes for the given instruction sequence. // Edges which extend between computations are added to the vector // intercomputation_edges. This is necessary because graphviz does not render @@ -171,15 +110,12 @@ string InstructionSequenceGraph( for (auto& instruction : instructions) { string color = "peachpuff"; string shape = "ellipse"; - string name = HloOpcodeString(instruction->opcode()); - if (HloOpcode::kFusion == instruction->opcode()) { - name += ": " + FusionKindString(instruction->fusion_kind()); - } + string name = instruction->ExtendedOpcodeStr(); if (HloOpcode::kConvolution == instruction->opcode()) { - name += ":\\n" + ConvolutionDimensionNumbersToString( - instruction->convolution_dimension_numbers()) + - WindowToString(instruction->window()); + name += ":\\n" + instruction->ConvolutionDimensionNumbersToString() + + "\\n" + window_util::ToString(instruction->window()); } + name += "\\n" + instruction->name(); if (!instruction->metadata().op_type().empty()) { StrAppend(&name, "\\n", instruction->metadata().op_type()); @@ -532,4 +468,5 @@ void DumpText(const HloModule& module, const string& label, } } // namespace hlo_graph_dumper + } // namespace xla diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc index 883f9751d19322..612aee6d137fbf 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction.cc @@ -1352,6 +1352,15 @@ string HloInstruction::SignatureString() const { ShapeUtil::HumanString(shape())); } +string HloInstruction::ExtendedOpcodeStr() const { + string opc_name = HloOpcodeString(opcode()); + HloOpcode opc = opcode(); + if (HloOpcode::kFusion == opc) { + opc_name += ":" + xla::ToString(fusion_kind()); + } + return opc_name; +} + string HloInstruction::ToString(bool compact_operands) const { string operands; if (opcode() == HloOpcode::kConstant) { @@ -1409,46 +1418,9 @@ string HloInstruction::ToString(bool compact_operands) const { } StrAppend(&extra, ", slice={", Join(bounds, ", "), "}"); } - if (convolution_dimension_numbers_ != nullptr) { - const auto& dnums = *convolution_dimension_numbers_; - - // Show the given dimension labels in order of major to minor based on the - // shape's layout. - const auto append_dims = [&](const std::vector& dims, - const Shape& shape) { - CHECK_EQ(dims.size(), ShapeUtil::Rank(shape)); - for (int64 logical = 0; logical < dims.size(); ++logical) { - int64 physical = logical; - if (!shape.layout().minor_to_major().empty()) { - physical = LayoutUtil::Major(shape.layout(), logical); - } - extra += dims[physical]; - } - }; - - // lhs_dims[i] is the symbol of the logical dimension i for the lhs - // operand. E.g. if batch has dimension number 2, then lhs_dims[2] == "b". - std::vector lhs_dims(2 + dnums.spatial_dimensions().size()); - lhs_dims[dnums.batch_dimension()] = 'b'; - lhs_dims[dnums.feature_dimension()] = 'f'; - for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) { - lhs_dims[dnums.spatial_dimensions(i)] = tensorflow::strings::StrCat(i); - } - - std::vector rhs_dims(2 + dnums.kernel_spatial_dimensions().size()); - rhs_dims[dnums.kernel_input_feature_dimension()] = "i"; - rhs_dims[dnums.kernel_output_feature_dimension()] = "o"; - for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) { - rhs_dims[dnums.kernel_spatial_dimensions(i)] = - tensorflow::strings::StrCat(i); - } - extra += " dims: "; - append_dims(lhs_dims, operands_.at(0)->shape()); - extra += "_"; - append_dims(rhs_dims, operands_.at(1)->shape()); - extra += "->"; - append_dims(lhs_dims, shape()); + if (convolution_dimension_numbers_ != nullptr) { + StrAppend(&extra, ", ", ConvolutionDimensionNumbersToString()); } if (opcode() == HloOpcode::kWhile) { @@ -1474,8 +1446,7 @@ string HloInstruction::ToString(bool compact_operands) const { } return Printf("%s = %s %s(%s)%s", name().c_str(), ShapeUtil::HumanStringWithLayout(shape()).c_str(), - HloOpcodeString(opcode()).c_str(), operands.c_str(), - extra.c_str()); + ExtendedOpcodeStr().c_str(), operands.c_str(), extra.c_str()); } string HloInstruction::ToShortString() const { @@ -2114,19 +2085,65 @@ HloInstruction::ReshapeMerelyInsertsOrDeletes1SizedDimensions() const { shape_); } -string FusionKindString(HloInstruction::FusionKind kind) { +string ToString(HloInstruction::FusionKind kind) { switch (kind) { case HloInstruction::FusionKind::kLoop: - return "Loop"; + return "kLoop"; case HloInstruction::FusionKind::kInput: - return "Input"; + return "kInput"; case HloInstruction::FusionKind::kTransposeDot: - return "TransposeDot"; + return "kTransposeDot"; case HloInstruction::FusionKind::kConvBackwardFilter: - return "ConvBackwardFilter"; + return "kConvBackwardFilter"; case HloInstruction::FusionKind::kConvBackwardInput: - return "ConvBackwardInput"; + return "kConvBackwardInput"; + } +} + +string HloInstruction::ConvolutionDimensionNumbersToString() const { + string result; + if (convolution_dimension_numbers_ == nullptr) { + return result; } + const ConvolutionDimensionNumbers& dnums = *convolution_dimension_numbers_; + // Show the given dimension labels in order of major to minor based on the + // shape's layout. + const auto append_dims = [&](const std::vector& dims, + const Shape& shape) { + CHECK_EQ(dims.size(), ShapeUtil::Rank(shape)); + for (int64 logical = 0; logical < dims.size(); ++logical) { + int64 physical = logical; + if (!shape.layout().minor_to_major().empty()) { + physical = LayoutUtil::Major(shape.layout(), logical); + } + result += dims[physical]; + } + }; + + // lhs_dims[i] is the symbol of the logical dimension i for the lhs + // operand. E.g. if batch has dimension number 2, then lhs_dims[2] == "b". + std::vector lhs_dims(2 + dnums.spatial_dimensions().size()); + lhs_dims[dnums.batch_dimension()] = 'b'; + lhs_dims[dnums.feature_dimension()] = 'f'; + for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) { + lhs_dims[dnums.spatial_dimensions(i)] = tensorflow::strings::StrCat(i); + } + + std::vector rhs_dims(2 + dnums.kernel_spatial_dimensions().size()); + rhs_dims[dnums.kernel_input_feature_dimension()] = "i"; + rhs_dims[dnums.kernel_output_feature_dimension()] = "o"; + for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) { + rhs_dims[dnums.kernel_spatial_dimensions(i)] = + tensorflow::strings::StrCat(i); + } + + result += "dim_labels="; + append_dims(lhs_dims, operand(0)->shape()); + result += "_"; + append_dims(rhs_dims, operand(1)->shape()); + result += "->"; + append_dims(lhs_dims, shape()); + return result; } bool HloInstruction::CouldBeBitcast() const { diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h index 926a984d220bf3..207f229a1b9291 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.h +++ b/tensorflow/compiler/xla/service/hlo_instruction.h @@ -632,6 +632,9 @@ class HloInstruction { return *convolution_dimension_numbers_; } + // Returns the dump string of the convolution dimension numbers. + string ConvolutionDimensionNumbersToString() const; + // Returns the random distribution for this rng node. // // Precondition: opcode() == HloOpcode::kRng @@ -687,6 +690,11 @@ class HloInstruction { std::tuple, std::vector> ReshapeMerelyInsertsOrDeletes1SizedDimensions() const; + // Returns the opcode string for this instruction. Compared with + // HloOpcodeString method, this wrapper dumps additional information + // such as fusion kind. + string ExtendedOpcodeStr() const; + // Returns a string identifier for this instruction. If no string identifier // has been explicitly set, then the identifier is the serialized pointer to // this instruction. @@ -884,7 +892,7 @@ class HloInstruction { TF_DISALLOW_COPY_AND_ASSIGN(HloInstruction); }; -string FusionKindString(HloInstruction::FusionKind kind); +string ToString(HloInstruction::FusionKind kind); } // namespace xla