Skip to content

Commit

Permalink
Merge pull request tensorflow#37697 from kiszk:spelling_tweaks_lite_t…
Browse files Browse the repository at this point in the history
…ools

PiperOrigin-RevId: 302104259
Change-Id: I5df20176812e9353a9ef94947505c41bc38417af
  • Loading branch information
tensorflower-gardener committed Mar 20, 2020
2 parents 2234caf + c4a4aab commit 6119ba3
Show file tree
Hide file tree
Showing 12 changed files with 46 additions and 46 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def main():
parser.add_argument(
'--ilsvrc_devkit_dir',
type=str,
help='Full path to ILSVRC 2012 devikit directory.')
help='Full path to ILSVRC 2012 devkit directory.')
args = parser.parse_args()
try:
_check_arguments(args)
Expand Down
8 changes: 4 additions & 4 deletions tensorflow/lite/tools/benchmark/benchmark_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,10 +321,10 @@ TEST(BenchmarkTest, DoesntCrashWithExplicitInputValueFilesStringModel) {

class MaxDurationWorksTestListener : public BenchmarkListener {
void OnBenchmarkEnd(const BenchmarkResults& results) override {
const int64_t num_actul_runs = results.inference_time_us().count();
TFLITE_LOG(INFO) << "number of actual runs: " << num_actul_runs;
EXPECT_GE(num_actul_runs, 1);
EXPECT_LT(num_actul_runs, 100000000);
const int64_t num_actual_runs = results.inference_time_us().count();
TFLITE_LOG(INFO) << "number of actual runs: " << num_actual_runs;
EXPECT_GE(num_actual_runs, 1);
EXPECT_LT(num_actual_runs, 100000000);
}
};

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ BenchmarkTfLiteModel::CreateRandomTensorData(const TfLiteTensor& t,
#else
// You need to build with -DTFLITE_ENABLE_FP16_CPU_BENCHMARKS=1 using a
// compiler that supports __fp16 type. Note: when using Clang and *not*
// linking with compiler-rt, a defintion of __gnu_h2f_ieee and
// linking with compiler-rt, a definition of __gnu_h2f_ieee and
// __gnu_f2h_ieee must be supplied.
TFLITE_LOG(FATAL) << "Populating the tensor " << t.name
<< " of type FLOAT16 is disabled.";
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/tools/evaluation/evaluation_stage.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ namespace evaluation {
// Superclass for a single stage of an EvaluationPipeline.
// Defines basic skeleton for sub-classes to implement.
//
// Ideally EvaluationStages should obtain access to initilizer/input objects via
// Get/Set methods on pointers, and not take ownership unless necessary.
// Ideally EvaluationStages should obtain access to initializer/input objects
// via Get/Set methods on pointers, and not take ownership unless necessary.
class EvaluationStage {
public:
// Initializes an EvaluationStage, including verifying the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class TopkAccuracyEvalStage : public EvaluationStage {

private:
// Updates accuracy_counts_ based on comparing top k labels and the
// groundtruth one. Using string comparision since there are some duplicate
// groundtruth one. Using string comparison since there are some duplicate
// labels in the imagenet dataset.
void UpdateCounts(const std::vector<int>& topk_indices);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ TEST(ImageMetricsTest, BBoxAPwithIgnoredGroundTruth) {

pd.push_back({false, 100, 0.95, {{0.9, 1.9}, {0.9, 1.9}}});

// Two gt and three pd, one pair get ignored. So it's actuallly one gt with
// Two gt and three pd, one pair get ignored. So it's actually one gt with
// two pd.
EXPECT_NEAR(0.5, AveragePrecision().FromBoxes(gt, pd), 1e-6);
gt[0].ignore = kIgnoreAllMatches;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/tools/gen_op_registration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class GenOpRegistrationTest : public ::testing::Test {
std::map<string, std::pair<int, int>> custom_ops_;
};

TEST_F(GenOpRegistrationTest, TestNonExistantFiles) {
TEST_F(GenOpRegistrationTest, TestNonExistentFiles) {
ReadOps("/tmp/tflite_model_1234");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ inline void LstmStepWithAuxInput(
float* output_state_ptr, float* cell_state_ptr, float* input_gate_scratch,
float* forget_gate_scratch, float* cell_scratch, float* output_gate_scratch,
float* output_ptr, Logger* logger,
const std::vector<int>& intemediate_tensor_indexes,
const std::vector<int>& intermediate_tensor_indexes,
ErrorReporter* error_reporter) {
// Since we have already checked that weights are all there or none, we can
// check the existence of only one to the get the condition.
Expand Down Expand Up @@ -155,7 +155,7 @@ inline void LstmStepWithAuxInput(
input_gate_scratch);
}
if (use_layer_norm) {
logger->LogTensorValue(intemediate_tensor_indexes[0], input_gate_scratch,
logger->LogTensorValue(intermediate_tensor_indexes[0], input_gate_scratch,
n_cell * n_batch, error_reporter);
tensor_utils::MeanStddevNormalization(
input_gate_scratch, input_gate_scratch, n_cell, n_batch);
Expand All @@ -176,7 +176,7 @@ inline void LstmStepWithAuxInput(
forget_gate_scratch);
}
if (use_layer_norm) {
logger->LogTensorValue(intemediate_tensor_indexes[1], forget_gate_scratch,
logger->LogTensorValue(intermediate_tensor_indexes[1], forget_gate_scratch,
n_cell * n_batch, error_reporter);
tensor_utils::MeanStddevNormalization(forget_gate_scratch,
forget_gate_scratch, n_cell, n_batch);
Expand All @@ -193,7 +193,7 @@ inline void LstmStepWithAuxInput(
tensor_utils::VectorVectorCwiseProduct(forget_gate_scratch, cell_state_ptr,
n_batch * n_cell, cell_state_ptr);
if (use_layer_norm) {
logger->LogTensorValue(intemediate_tensor_indexes[2], cell_scratch,
logger->LogTensorValue(intermediate_tensor_indexes[2], cell_scratch,
n_cell * n_batch, error_reporter);
tensor_utils::MeanStddevNormalization(cell_scratch, cell_scratch, n_cell,
n_batch);
Expand Down Expand Up @@ -226,7 +226,7 @@ inline void LstmStepWithAuxInput(
output_gate_scratch);
}
if (use_layer_norm) {
logger->LogTensorValue(intemediate_tensor_indexes[3], output_gate_scratch,
logger->LogTensorValue(intermediate_tensor_indexes[3], output_gate_scratch,
n_cell * n_batch, error_reporter);
tensor_utils::MeanStddevNormalization(output_gate_scratch,
output_gate_scratch, n_cell, n_batch);
Expand All @@ -243,7 +243,7 @@ inline void LstmStepWithAuxInput(
tensor_utils::VectorVectorCwiseProduct(output_gate_scratch, cell_scratch,
n_batch * n_cell, output_gate_scratch);

logger->LogTensorValue(intemediate_tensor_indexes[4], output_gate_scratch,
logger->LogTensorValue(intermediate_tensor_indexes[4], output_gate_scratch,
n_cell * n_batch, error_reporter);

const bool use_projection_weight = (projection_weights_ptr != nullptr);
Expand Down Expand Up @@ -314,7 +314,7 @@ TfLiteStatus EvalFloat(
int output_offset, TfLiteTensor* scratch_buffer,
TfLiteTensor* activation_state, TfLiteTensor* cell_state,
TfLiteTensor* output, Logger* logger,
const std::vector<int>& intemediate_tensor_indexes,
const std::vector<int>& intermediate_tensor_indexes,
ErrorReporter* error_reporter) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
int max_time, n_batch;
Expand Down Expand Up @@ -402,7 +402,7 @@ TfLiteStatus EvalFloat(
GetTensorData<float>(activation_state),
GetTensorData<float>(cell_state), input_gate_scratch,
forget_gate_scratch, cell_scratch, output_gate_scratch,
output_ptr_time, logger, intemediate_tensor_indexes, error_reporter);
output_ptr_time, logger, intermediate_tensor_indexes, error_reporter);
}
} else {
for (int b = 0; b < n_batch; b++) {
Expand Down Expand Up @@ -463,7 +463,7 @@ TfLiteStatus EvalFloat(
n_cell, n_input, aux_input_size, n_output, output_batch_leading_dim,
activation_state_ptr, cell_state_ptr, input_gate_scratch_ptr,
forget_gate_scratch_ptr, cell_scratch_ptr, output_gate_scratch_ptr,
output_ptr, logger, intemediate_tensor_indexes, error_reporter);
output_ptr, logger, intermediate_tensor_indexes, error_reporter);
}
}
}
Expand Down Expand Up @@ -559,9 +559,9 @@ TfLiteStatus lstm_eval(TfLiteContext* context, TfLiteNode* node, Logger* logger,
TfLiteTensor* output =
GetOutput(context, node, ops::builtin::lstm::full::kOutputTensor);

std::vector<int> intemediate_tensor_indexes(node->intermediates->size);
std::vector<int> intermediate_tensor_indexes(node->intermediates->size);
for (int i = 0; i < node->intermediates->size; ++i) {
intemediate_tensor_indexes[i] = node->intermediates->data[i];
intermediate_tensor_indexes[i] = node->intermediates->data[i];
}

switch (input_to_output_weights->type) {
Expand All @@ -583,7 +583,7 @@ TfLiteStatus lstm_eval(TfLiteContext* context, TfLiteNode* node, Logger* logger,
projection_bias, params, /*forward_sequence=*/true,
/*time_major=*/true,
/*output_offset=*/0, scratch_buffer, activation_state, cell_state,
output, logger, intemediate_tensor_indexes, error_reporter);
output, logger, intermediate_tensor_indexes, error_reporter);
}
case kTfLiteUInt8:
case kTfLiteInt8:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ TEST(CalibratorTest, MultipleInvokes) {
EXPECT_NEAR(stats.at(tensor_idx).max, expected_values[tensor_idx], eps);
}
// Set input[0][0] = 1.5 and input[0][1] = 0.5 this should change the values
// only for input[0] and tensor 4 and ouputs 5, 6.
// only for input[0] and tensor 4 and outputs 5, 6.
TfLiteTensor* input0 = interpreter->tensor(0);
input0->data.f[0] = 1.5f;
input0->data.f[1] = 0.5f;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,20 @@ LoggingOpResolver::LoggingOpResolver(
base_resolver.FindOp(op_and_version.first, op_and_version.second);
BuiltinOperatorKey key = op_and_version;
builtin_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registation =
auto logging_registration =
absl::make_unique<TfLiteRegistration>(*base_registration);
logging_registation->invoke = logging_eval_fn;
builtin_op_registration_map_[key] = std::move(logging_registation);
logging_registration->invoke = logging_eval_fn;
builtin_op_registration_map_[key] = std::move(logging_registration);
}
for (const auto& op_and_version : custom_ops_to_replace) {
const TfLiteRegistration* base_registration = base_resolver.FindOp(
op_and_version.first.c_str(), op_and_version.second);
CustomOperatorKey key = op_and_version;
custom_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registation =
auto logging_registration =
absl::make_unique<TfLiteRegistration>(*base_registration);
logging_registation->invoke = logging_eval_fn;
custom_op_registration_map_[key] = std::move(logging_registation);
logging_registration->invoke = logging_eval_fn;
custom_op_registration_map_[key] = std::move(logging_registration);
}
}

Expand Down
24 changes: 12 additions & 12 deletions tensorflow/lite/tools/optimize/operator_property.cc
Original file line number Diff line number Diff line change
Expand Up @@ -496,8 +496,8 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
tensor_property_9.number_of_bits = 16;
tensor_property_9.symmetric = true;
// Without layer norm, we choose to quantize bias with the scale of
// input and its correpsonding weight. The other choice will
// be to ues the scale of recurrent and its correpsonding weight but we
// input and its corresponding weight. The other choice will
// be to ues the scale of recurrent and its corresponding weight but we
// choose to use the smaller scale, which means higher resolution.
TensorProperty tensor_property_12;
tensor_property_12.use_derived_scale = true;
Expand Down Expand Up @@ -548,7 +548,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
};
property.outputs = {{0, {}}};
property.intermediates = {
// Without layer normliazation, intermediate tensors 0, 1, 2, 3 are
// Without layer normalization, intermediate tensors 0, 1, 2, 3 are
// not used and and their quantization parameters are ignored.
{0, {}},
{1, {}},
Expand All @@ -563,8 +563,8 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
if (!op_variant.use_layer_norm && op_variant.use_projection &&
!op_variant.use_peephole) {
// Without layer norm, we choose to quantize bias with the scale of
// input and its correpsonding weight. The other choice will
// be to ues the scale of recurrent and its correpsonding weight but we
// input and its corresponding weight. The other choice will
// be to ues the scale of recurrent and its corresponding weight but we
// choose to use the smaller scale, which means higher resolution.
TensorProperty tensor_property_12;
tensor_property_12.use_derived_scale = true;
Expand Down Expand Up @@ -612,7 +612,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
};
property.outputs = {{0, {}}};
property.intermediates = {
// Without layer normliazation, intermediate tensors 0, 1, 2, 3 are
// Without layer normalization, intermediate tensors 0, 1, 2, 3 are
// not used and their quantization parameters are ignored.
{0, {}},
{1, {}},
Expand All @@ -630,8 +630,8 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
tensor_property_9.number_of_bits = 16;
tensor_property_9.symmetric = true;
// Without layer norm, we choose to quantize bias with the scale of
// input and its correpsonding weight. The other choice will
// be to ues the scale of recurrent and its correpsonding weight but we
// input and its corresponding weight. The other choice will
// be to ues the scale of recurrent and its corresponding weight but we
// choose to use the smaller scale, which means higher resolution.
TensorProperty tensor_property_12;
tensor_property_12.use_derived_scale = true;
Expand Down Expand Up @@ -676,7 +676,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
};
property.outputs = {{0, {}}};
property.intermediates = {
// Without layer normliazation, intermediate tensors 0, 1, 2, 3 are
// Without layer normalization, intermediate tensors 0, 1, 2, 3 are
// not used and their quantization parameters are ignored.
{0, {}},
{1, {}},
Expand All @@ -696,8 +696,8 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
if (!op_variant.use_layer_norm && !op_variant.use_projection &&
!op_variant.use_peephole) {
// Without layer norm, we choose to quantize bias with the scale of
// input and its correpsonding weight. The other choice will
// be to ues the scale of recurrent and its correpsonding weight but we
// input and its corresponding weight. The other choice will
// be to ues the scale of recurrent and its corresponding weight but we
// choose to use the smaller scale, which means higher resolution.
TensorProperty tensor_property_12;
tensor_property_12.use_derived_scale = true;
Expand Down Expand Up @@ -739,7 +739,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
};
property.outputs = {{0, {}}};
property.intermediates = {
// Without layer normliazation, intermediate tensors 0, 1, 2, 3 are
// Without layer normalization, intermediate tensors 0, 1, 2, 3 are
// not used and their quantization parameters are ignored.
{0, {}},
{1, {}},
Expand Down
8 changes: 4 additions & 4 deletions tensorflow/lite/tools/optimize/quantize_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ TfLiteStatus ApplyConstraints(ModelT* model,

// Add requant op before this input.
// There are better ways to handle this, which is to try to push the
// rescale upwards recurrsively and hope all upstream ops can absort
// rescale upwards recursively and hope all upstream ops can absort
// this rescale.and only add requant when there is no other way.
std::unique_ptr<OperatorT> requant_op;
utils::MakeQuantizeOperator(model, &requant_op, op->inputs[input_idx],
Expand Down Expand Up @@ -747,9 +747,9 @@ TfLiteStatus QuantizeIntemediateTensors(ModelT* model,
// Quantize tensros that have shared range. For example, in LSTM, the output
// tensor and input state tensor should share the same range because they are
// using the same scale and zero point.
// We have to model this explicitely because the output is modeled as an extra
// We have to model this explicitly because the output is modeled as an extra
// tensor in LSTM. In calibrator, state tensors are logged both before and after
// the inferece so the range is fully captured. But output, although it is
// the inference so the range is fully captured. But output, although it is
// identical to activation, is not a state tensor the input value (range) of the
// very first inference is not captured.
TfLiteStatus QuantizeSharedRange(ModelT* model, ErrorReporter* error_reporter) {
Expand Down Expand Up @@ -1073,7 +1073,7 @@ TfLiteStatus EnsureBiasScaleCompatibility(
return kTfLiteError;
}

// Get input scale for assymmetric quantization.
// Get input scale for asymmetric quantization.
QuantizationParametersT temp_quant_params = QuantizationParametersT();
utils::GetAsymmetricQuantizationParams(
input_tensor->quantization->min[0],
Expand Down

0 comments on commit 6119ba3

Please sign in to comment.