Skip to content

Commit

Permalink
Refactor convolution dimension numbers and windows dumping code
Browse files Browse the repository at this point in the history
   and remove duplicate code in hlo_graph_dumper
Change: 150324515
  • Loading branch information
tensorflower-gardener committed Mar 16, 2017
1 parent f4a0c2c commit 1258b20
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 117 deletions.
1 change: 1 addition & 0 deletions tensorflow/compiler/xla/service/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1397,6 +1397,7 @@ cc_library(
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:window_util",
"//tensorflow/compiler/xla/legacy_flags:hlo_graph_dumper_flags",
"//tensorflow/core:lib",
],
Expand Down
75 changes: 6 additions & 69 deletions tensorflow/compiler/xla/service/hlo_graph_dumper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/window_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
Expand Down Expand Up @@ -56,68 +57,6 @@ string ComputationId(const HloComputation* computation) {
return Printf("%lld", reinterpret_cast<uint64>(computation));
}

// Returns a compact string that represents the convolution dimension numbers.
string ConvolutionDimensionNumbersToString(
const ConvolutionDimensionNumbers& dim_numbers) {
return Printf("B@%lld,Z@%lld,KIZ@%lld,KOZ@%lld",
dim_numbers.batch_dimension(), dim_numbers.feature_dimension(),
dim_numbers.kernel_input_feature_dimension(),
dim_numbers.kernel_output_feature_dimension());
}

// Returns a compact string that represents the non-trivial fields in the window
// description. If there are no non-trivial fields, the empty string is
// returned.
string WindowToString(const Window& window) {
bool display_padding = false;
bool display_window_dilation = false;
bool display_base_dilation = false;
bool display_stride = false;
for (const WindowDimension& dimension : window.dimensions()) {
display_padding |=
dimension.padding_low() != 0 || dimension.padding_high() != 0;
display_window_dilation |= dimension.window_dilation() != 1;
display_base_dilation |= dimension.base_dilation() != 1;
display_stride |= dimension.stride() != 1;
}
std::vector<string> pieces = {};
if (display_padding) {
pieces.push_back("\\n");
pieces.push_back("padding=[");
for (const WindowDimension& dimension : window.dimensions()) {
pieces.push_back(StrCat("(", dimension.padding_low(), ",",
dimension.padding_high(), ")"));
pieces.push_back(", ");
}
pieces.pop_back();
pieces.push_back("]");
}
// Make a convenient lambda that adds a simple int64 field in each
// WindowDimension.
auto add_field = [&pieces, &window](
const string& label,
tensorflow::protobuf_int64 (WindowDimension::*member)() const) {
pieces.push_back("\\n");
pieces.push_back(label + "=[");
for (const WindowDimension& dimension : window.dimensions()) {
pieces.push_back(StrCat(((&dimension)->*member)()));
pieces.push_back(", ");
}
pieces.pop_back();
pieces.push_back("]");
};
if (display_window_dilation) {
add_field("window_dilation", &WindowDimension::window_dilation);
}
if (display_base_dilation) {
add_field("base_dilation", &WindowDimension::base_dilation);
}
if (display_stride) {
add_field("stride", &WindowDimension::stride);
}
return Join(pieces, "");
}

// Returns the dot graph edges and nodes for the given instruction sequence.
// Edges which extend between computations are added to the vector
// intercomputation_edges. This is necessary because graphviz does not render
Expand Down Expand Up @@ -171,15 +110,12 @@ string InstructionSequenceGraph(
for (auto& instruction : instructions) {
string color = "peachpuff";
string shape = "ellipse";
string name = HloOpcodeString(instruction->opcode());
if (HloOpcode::kFusion == instruction->opcode()) {
name += ": " + FusionKindString(instruction->fusion_kind());
}
string name = instruction->ExtendedOpcodeStr();
if (HloOpcode::kConvolution == instruction->opcode()) {
name += ":\\n" + ConvolutionDimensionNumbersToString(
instruction->convolution_dimension_numbers()) +
WindowToString(instruction->window());
name += ":\\n" + instruction->ConvolutionDimensionNumbersToString() +
"\\n" + window_util::ToString(instruction->window());
}

name += "\\n" + instruction->name();
if (!instruction->metadata().op_type().empty()) {
StrAppend(&name, "\\n", instruction->metadata().op_type());
Expand Down Expand Up @@ -532,4 +468,5 @@ void DumpText(const HloModule& module, const string& label,
}

} // namespace hlo_graph_dumper

} // namespace xla
111 changes: 64 additions & 47 deletions tensorflow/compiler/xla/service/hlo_instruction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1352,6 +1352,15 @@ string HloInstruction::SignatureString() const {
ShapeUtil::HumanString(shape()));
}

string HloInstruction::ExtendedOpcodeStr() const {
string opc_name = HloOpcodeString(opcode());
HloOpcode opc = opcode();
if (HloOpcode::kFusion == opc) {
opc_name += ":" + xla::ToString(fusion_kind());
}
return opc_name;
}

string HloInstruction::ToString(bool compact_operands) const {
string operands;
if (opcode() == HloOpcode::kConstant) {
Expand Down Expand Up @@ -1409,46 +1418,9 @@ string HloInstruction::ToString(bool compact_operands) const {
}
StrAppend(&extra, ", slice={", Join(bounds, ", "), "}");
}
if (convolution_dimension_numbers_ != nullptr) {
const auto& dnums = *convolution_dimension_numbers_;

// Show the given dimension labels in order of major to minor based on the
// shape's layout.
const auto append_dims = [&](const std::vector<string>& dims,
const Shape& shape) {
CHECK_EQ(dims.size(), ShapeUtil::Rank(shape));
for (int64 logical = 0; logical < dims.size(); ++logical) {
int64 physical = logical;
if (!shape.layout().minor_to_major().empty()) {
physical = LayoutUtil::Major(shape.layout(), logical);
}
extra += dims[physical];
}
};

// lhs_dims[i] is the symbol of the logical dimension i for the lhs
// operand. E.g. if batch has dimension number 2, then lhs_dims[2] == "b".
std::vector<string> lhs_dims(2 + dnums.spatial_dimensions().size());
lhs_dims[dnums.batch_dimension()] = 'b';
lhs_dims[dnums.feature_dimension()] = 'f';
for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) {
lhs_dims[dnums.spatial_dimensions(i)] = tensorflow::strings::StrCat(i);
}

std::vector<string> rhs_dims(2 + dnums.kernel_spatial_dimensions().size());
rhs_dims[dnums.kernel_input_feature_dimension()] = "i";
rhs_dims[dnums.kernel_output_feature_dimension()] = "o";
for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) {
rhs_dims[dnums.kernel_spatial_dimensions(i)] =
tensorflow::strings::StrCat(i);
}

extra += " dims: ";
append_dims(lhs_dims, operands_.at(0)->shape());
extra += "_";
append_dims(rhs_dims, operands_.at(1)->shape());
extra += "->";
append_dims(lhs_dims, shape());
if (convolution_dimension_numbers_ != nullptr) {
StrAppend(&extra, ", ", ConvolutionDimensionNumbersToString());
}

if (opcode() == HloOpcode::kWhile) {
Expand All @@ -1474,8 +1446,7 @@ string HloInstruction::ToString(bool compact_operands) const {
}
return Printf("%s = %s %s(%s)%s", name().c_str(),
ShapeUtil::HumanStringWithLayout(shape()).c_str(),
HloOpcodeString(opcode()).c_str(), operands.c_str(),
extra.c_str());
ExtendedOpcodeStr().c_str(), operands.c_str(), extra.c_str());
}

string HloInstruction::ToShortString() const {
Expand Down Expand Up @@ -2114,19 +2085,65 @@ HloInstruction::ReshapeMerelyInsertsOrDeletes1SizedDimensions() const {
shape_);
}

string FusionKindString(HloInstruction::FusionKind kind) {
string ToString(HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kLoop:
return "Loop";
return "kLoop";
case HloInstruction::FusionKind::kInput:
return "Input";
return "kInput";
case HloInstruction::FusionKind::kTransposeDot:
return "TransposeDot";
return "kTransposeDot";
case HloInstruction::FusionKind::kConvBackwardFilter:
return "ConvBackwardFilter";
return "kConvBackwardFilter";
case HloInstruction::FusionKind::kConvBackwardInput:
return "ConvBackwardInput";
return "kConvBackwardInput";
}
}

string HloInstruction::ConvolutionDimensionNumbersToString() const {
string result;
if (convolution_dimension_numbers_ == nullptr) {
return result;
}
const ConvolutionDimensionNumbers& dnums = *convolution_dimension_numbers_;
// Show the given dimension labels in order of major to minor based on the
// shape's layout.
const auto append_dims = [&](const std::vector<string>& dims,
const Shape& shape) {
CHECK_EQ(dims.size(), ShapeUtil::Rank(shape));
for (int64 logical = 0; logical < dims.size(); ++logical) {
int64 physical = logical;
if (!shape.layout().minor_to_major().empty()) {
physical = LayoutUtil::Major(shape.layout(), logical);
}
result += dims[physical];
}
};

// lhs_dims[i] is the symbol of the logical dimension i for the lhs
// operand. E.g. if batch has dimension number 2, then lhs_dims[2] == "b".
std::vector<string> lhs_dims(2 + dnums.spatial_dimensions().size());
lhs_dims[dnums.batch_dimension()] = 'b';
lhs_dims[dnums.feature_dimension()] = 'f';
for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) {
lhs_dims[dnums.spatial_dimensions(i)] = tensorflow::strings::StrCat(i);
}

std::vector<string> rhs_dims(2 + dnums.kernel_spatial_dimensions().size());
rhs_dims[dnums.kernel_input_feature_dimension()] = "i";
rhs_dims[dnums.kernel_output_feature_dimension()] = "o";
for (int64 i = 0; i < dnums.spatial_dimensions().size(); ++i) {
rhs_dims[dnums.kernel_spatial_dimensions(i)] =
tensorflow::strings::StrCat(i);
}

result += "dim_labels=";
append_dims(lhs_dims, operand(0)->shape());
result += "_";
append_dims(rhs_dims, operand(1)->shape());
result += "->";
append_dims(lhs_dims, shape());
return result;
}

bool HloInstruction::CouldBeBitcast() const {
Expand Down
10 changes: 9 additions & 1 deletion tensorflow/compiler/xla/service/hlo_instruction.h
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,9 @@ class HloInstruction {
return *convolution_dimension_numbers_;
}

// Returns the dump string of the convolution dimension numbers.
string ConvolutionDimensionNumbersToString() const;

// Returns the random distribution for this rng node.
//
// Precondition: opcode() == HloOpcode::kRng
Expand Down Expand Up @@ -687,6 +690,11 @@ class HloInstruction {
std::tuple<bool, std::vector<int64>, std::vector<int64>>
ReshapeMerelyInsertsOrDeletes1SizedDimensions() const;

// Returns the opcode string for this instruction. Compared with
// HloOpcodeString method, this wrapper dumps additional information
// such as fusion kind.
string ExtendedOpcodeStr() const;

// Returns a string identifier for this instruction. If no string identifier
// has been explicitly set, then the identifier is the serialized pointer to
// this instruction.
Expand Down Expand Up @@ -884,7 +892,7 @@ class HloInstruction {
TF_DISALLOW_COPY_AND_ASSIGN(HloInstruction);
};

string FusionKindString(HloInstruction::FusionKind kind);
string ToString(HloInstruction::FusionKind kind);

} // namespace xla

Expand Down

0 comments on commit 1258b20

Please sign in to comment.