Skip to content

Commit be6e1ce

Browse files
authored
Merge pull request #3 from tensorflow/master
post 2.1
2 parents 4235c01 + 2ffb2fb commit be6e1ce

File tree

1,577 files changed

+28575
-175118
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,577 files changed

+28575
-175118
lines changed

.bazelrc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -381,9 +381,9 @@ build:rbe_linux_py3 --python_path="/usr/bin/python3"
381381
build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/py3"
382382

383383
build:rbe_win --config=rbe
384-
build:rbe_win --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/win_1803/bazel_026:toolchain"
384+
build:rbe_win --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/win_1803/bazel_121:toolchain"
385385
build:rbe_win --extra_execution_platforms="@org_tensorflow//third_party/toolchains/preconfig/win_1803:rbe_windows_1803"
386-
build:rbe_win --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/win_1803/bazel_026:cc-toolchain-x64_windows"
386+
build:rbe_win --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/win_1803/bazel_121:cc-toolchain-x64_windows"
387387
build:rbe_win --host_javabase="@org_tensorflow//third_party/toolchains/preconfig/win_1803:windows_jdk8"
388388
build:rbe_win --host_platform="@org_tensorflow//third_party/toolchains/preconfig/win_1803:rbe_windows_1803"
389389
build:rbe_win --javabase="@org_tensorflow//third_party/toolchains/preconfig/win_1803:windows_jdk8"

.bazelversion

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
1.1.0
1+
1.2.1

README.md

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -29,20 +29,6 @@ to
2929
[announce@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/announce).
3030
See all the [mailing lists](https://www.tensorflow.org/community/forums).
3131

32-
## Feature Prioritization Survey
33-
34-
The TensorFlow team is working on building/improving features, and understands
35-
that it is very important to prioritize these efforts based on what TF users
36-
need.
37-
38-
The goal of this short, < 5 minute
39-
[survey](https://google.qualtrics.com/jfe/form/SV_d5nqhCEbkDkQ7ad), is to help
40-
the TensorFlow team better understand what features to prioritize based on your
41-
feedback. Participation is of course optional.
42-
43-
Take the survey
44-
[HERE](https://google.qualtrics.com/jfe/form/SV_d5nqhCEbkDkQ7ad).
45-
4632
## Install
4733

4834
See the [TensorFlow install guide](https://www.tensorflow.org/install) for the
@@ -164,4 +150,3 @@ Learn more about the
164150
## License
165151

166152
[Apache License 2.0](LICENSE)
167-

configure.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@
4949
_TF_WORKSPACE_ROOT = ''
5050
_TF_BAZELRC = ''
5151
_TF_CURRENT_BAZEL_VERSION = None
52-
_TF_MIN_BAZEL_VERSION = '1.0.0'
53-
_TF_MAX_BAZEL_VERSION = '1.1.0'
52+
_TF_MIN_BAZEL_VERSION = '1.2.1'
53+
_TF_MAX_BAZEL_VERSION = '1.2.1'
5454

5555
NCCL_LIB_PATHS = [
5656
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
@@ -147,14 +147,16 @@ def write_action_env_to_bazelrc(var_name, var):
147147
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
148148

149149

150-
def run_shell(cmd, allow_non_zero=False):
150+
def run_shell(cmd, allow_non_zero=False, stderr=None):
151+
if stderr is None:
152+
stderr = sys.stdout
151153
if allow_non_zero:
152154
try:
153-
output = subprocess.check_output(cmd)
155+
output = subprocess.check_output(cmd, stderr=stderr)
154156
except subprocess.CalledProcessError as e:
155157
output = e.output
156158
else:
157-
output = subprocess.check_output(cmd)
159+
output = subprocess.check_output(cmd, stderr=stderr)
158160
return output.decode('UTF-8').strip()
159161

160162

@@ -169,10 +171,12 @@ def get_python_path(environ_cp, python_bin_path):
169171
if environ_cp.get('PYTHONPATH'):
170172
python_paths = environ_cp.get('PYTHONPATH').split(':')
171173
try:
174+
stderr = open(os.devnull, 'wb')
172175
library_paths = run_shell([
173176
python_bin_path, '-c',
174177
'import site; print("\\n".join(site.getsitepackages()))'
175-
]).split('\n')
178+
],
179+
stderr=stderr).split('\n')
176180
except subprocess.CalledProcessError:
177181
library_paths = [
178182
run_shell([

tensorflow/c/c_api.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,7 @@ static void TF_Run_Helper(
458458
EmptyTensor(static_cast<TF_DataType>(src.dtype()), src.shape());
459459
continue;
460460
}
461-
c_outputs[i] = TF_TensorFromTensor(src, status);
461+
c_outputs[i] = TF_TensorFromTensor(src, &status->status);
462462
if (!status->status.ok()) return;
463463
}
464464
}
@@ -1493,7 +1493,7 @@ void TF_OperationGetAttrTensor(TF_Operation* oper, const char* attr_name,
14931493
Tensor t;
14941494
status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &t);
14951495
if (!status->status.ok()) return;
1496-
*value = TF_TensorFromTensor(t, status);
1496+
*value = TF_TensorFromTensor(t, &status->status);
14971497
}
14981498

14991499
void TF_OperationGetAttrTensorList(TF_Operation* oper, const char* attr_name,
@@ -1504,7 +1504,7 @@ void TF_OperationGetAttrTensorList(TF_Operation* oper, const char* attr_name,
15041504
if (!status->status.ok()) return;
15051505
const auto len = std::min(max_values, static_cast<int>(ts.size()));
15061506
for (int i = 0; i < len; ++i) {
1507-
values[i] = TF_TensorFromTensor(ts[i], status);
1507+
values[i] = TF_TensorFromTensor(ts[i], &status->status);
15081508
}
15091509
}
15101510

@@ -2398,7 +2398,7 @@ unsigned char TF_TryEvaluateConstant(TF_Graph* graph, TF_Output output,
23982398
graph->graph.versions().producer(), &evaluated, &result_tensor);
23992399
if (evaluated) {
24002400
DCHECK(status->status.ok());
2401-
*result = TF_TensorFromTensor(result_tensor, status);
2401+
*result = TF_TensorFromTensor(result_tensor, &status->status);
24022402
if (!status->status.ok()) evaluated = false;
24032403
}
24042404
return evaluated;

tensorflow/c/c_api_experimental.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,7 @@ TF_Tensor* TF_CheckpointReaderGetTensor(TF_CheckpointReader* reader,
634634
std::unique_ptr<tensorflow::Tensor> tensor;
635635
reader->GetTensor(name, &tensor, status);
636636
if (!status->status.ok()) return nullptr;
637-
return tensorflow::TF_TensorFromTensor(*tensor, status);
637+
return tensorflow::TF_TensorFromTensor(*tensor, &status->status);
638638
}
639639

640640
void TF_CheckpointReaderGetVariableShape(TF_CheckpointReader* reader,

tensorflow/c/c_api_internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ namespace tensorflow {
188188

189189
Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst);
190190

191-
TF_Tensor* TF_TensorFromTensor(const Tensor& src, TF_Status* status);
191+
TF_Tensor* TF_TensorFromTensor(const Tensor& src, Status* status);
192192

193193
Status MessageToBuffer(const tensorflow::protobuf::MessageLite& in,
194194
TF_Buffer* out);

tensorflow/c/c_api_test.cc

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ limitations under the License.
5151
#include "tensorflow/core/util/equal_graph_def.h"
5252

5353
namespace tensorflow {
54-
TF_Tensor* TF_TensorFromTensor(const Tensor& src, TF_Status* status);
54+
TF_Tensor* TF_TensorFromTensor(const Tensor& src, Status* status);
5555
Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst);
5656

5757
namespace {
@@ -227,7 +227,7 @@ TEST(CAPI, LibraryLoadFunctions) {
227227

228228
void TestEncodeDecode(int line, const std::vector<string>& data) {
229229
const tensorflow::int64 n = data.size();
230-
TF_Status* status = TF_NewStatus();
230+
Status status;
231231
for (const std::vector<tensorflow::int64>& dims :
232232
std::vector<std::vector<tensorflow::int64>>{
233233
{n}, {1, n}, {n, 1}, {n / 2, 2}}) {
@@ -236,8 +236,8 @@ void TestEncodeDecode(int line, const std::vector<string>& data) {
236236
for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) {
237237
src.flat<tstring>()(i) = data[i];
238238
}
239-
TF_Tensor* dst = TF_TensorFromTensor(src, status);
240-
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
239+
TF_Tensor* dst = TF_TensorFromTensor(src, &status);
240+
ASSERT_TRUE(status.ok()) << status.error_message();
241241

242242
// Convert back to a C++ Tensor and ensure we get expected output.
243243
Tensor output;
@@ -249,7 +249,6 @@ void TestEncodeDecode(int line, const std::vector<string>& data) {
249249

250250
TF_DeleteTensor(dst);
251251
}
252-
TF_DeleteStatus(status);
253252
}
254253

255254
TEST(CAPI, TensorEncodeDecodeStrings) {
@@ -1394,8 +1393,9 @@ TEST(CAPI, SavedModel) {
13941393
TF_Operation* input_op =
13951394
TF_GraphOperationByName(graph, input_op_name.c_str());
13961395
ASSERT_TRUE(input_op != nullptr);
1397-
csession.SetInputs({{input_op, TF_TensorFromTensor(input, s)}});
1398-
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
1396+
Status status;
1397+
csession.SetInputs({{input_op, TF_TensorFromTensor(input, &status)}});
1398+
ASSERT_TRUE(status.ok()) << status.error_message();
13991399

14001400
const tensorflow::string output_op_name(
14011401
tensorflow::ParseTensorName(output_name).first);
@@ -2522,12 +2522,11 @@ TEST(CAPI, TestTensorIsNotAligned) {
25222522

25232523
// Take an unaligned slice.
25242524
Tensor y = x.Slice(1, 13);
2525-
TF_Status* status = TF_NewStatus();
2526-
TF_Tensor* a = TF_TensorFromTensor(y, status);
2525+
Status status;
2526+
TF_Tensor* a = TF_TensorFromTensor(y, &status);
25272527
if (EIGEN_MAX_ALIGN_BYTES > 0) {
25282528
EXPECT_FALSE(TF_TensorIsAligned(a));
25292529
}
2530-
TF_DeleteStatus(status);
25312530
TF_DeleteTensor(a);
25322531
}
25332532

tensorflow/c/eager/c_api.cc

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,7 @@ tensorflow::Status UpdateTFE_ContextWithServerDef(
464464
&new_remote_device_mgr));
465465
remote_device_mgr = new_remote_device_mgr.get();
466466
} else {
467-
ctx->context->ClearCaches();
467+
ctx->context->ClearCachesAndDefaultExecutor();
468468
// TODO(b/143914772): Potential memory leak if rendezvous has pending
469469
// tensors for removed / replaced workers.
470470

@@ -638,34 +638,28 @@ tensorflow::Status OpInferSingleInputAttrs(TFE_Op* op,
638638

639639
void OpInferSingleTypeInputListAttrs(TFE_Op* op,
640640
const tensorflow::OpDef::ArgDef& input_def,
641-
TFE_TensorHandle** inputs,
641+
const tensorflow::DataType dtype,
642642
int num_inputs) {
643643
TFE_OpInferenceContext* ictx = op->inference_ctx.get();
644644
if (ictx->attrs.find(input_def.number_attr()) == ictx->attrs.end()) {
645645
op->operation.MutableAttrs()->Set(input_def.number_attr(), num_inputs);
646646
ictx->attrs.insert(input_def.number_attr());
647647
}
648648
if (ictx->attrs.find(input_def.type_attr()) == ictx->attrs.end()) {
649-
op->operation.MutableAttrs()->Set(input_def.type_attr(),
650-
inputs[0]->handle->dtype);
649+
op->operation.MutableAttrs()->Set(input_def.type_attr(), dtype);
651650
ictx->attrs.insert(input_def.type_attr());
652651
}
653652
}
654653

655-
void OpInferMixedTypeInputListAttrs(TFE_Op* op,
656-
const tensorflow::OpDef::ArgDef& input_def,
657-
TFE_TensorHandle** inputs, int num_inputs) {
654+
void OpInferMixedTypeInputListAttrs(
655+
TFE_Op* op, const tensorflow::OpDef::ArgDef& input_def,
656+
const std::vector<tensorflow::DataType>& dtypes) {
658657
TFE_OpInferenceContext* ictx = op->inference_ctx.get();
659658
if (ictx->attrs.find(input_def.type_list_attr()) == ictx->attrs.end()) {
660-
std::unique_ptr<tensorflow::DataType[]> dtypes(
661-
new tensorflow::DataType[num_inputs]);
662-
for (int i = 0; i < num_inputs; ++i) {
663-
dtypes[i] = inputs[i]->handle->dtype;
664-
}
665659
op->operation.MutableAttrs()->Set(
666660
input_def.type_list_attr(),
667-
tensorflow::gtl::ArraySlice<const tensorflow::DataType>(dtypes.get(),
668-
num_inputs));
661+
tensorflow::gtl::ArraySlice<const tensorflow::DataType>(dtypes.data(),
662+
dtypes.size()));
669663
ictx->attrs.insert(input_def.type_list_attr());
670664
}
671665
}
@@ -675,10 +669,15 @@ tensorflow::Status OpInferInputListAttrs(TFE_Op* op, TFE_TensorHandle** inputs,
675669
TFE_OpInferenceContext* ictx = op->inference_ctx.get();
676670
const auto& input_def = ictx->op_def->input_arg(ictx->input_arg_idx++);
677671
if (!input_def.type_list_attr().empty()) {
678-
OpInferMixedTypeInputListAttrs(op, input_def, inputs, num_inputs);
672+
std::vector<tensorflow::DataType> dtypes(num_inputs);
673+
for (int i = 0; i < num_inputs; ++i) {
674+
dtypes[i] = inputs[i]->handle->dtype;
675+
}
676+
OpInferMixedTypeInputListAttrs(op, input_def, dtypes);
679677
} else if (!input_def.type_attr().empty() &&
680678
!input_def.number_attr().empty()) {
681-
OpInferSingleTypeInputListAttrs(op, input_def, inputs, num_inputs);
679+
OpInferSingleTypeInputListAttrs(op, input_def, inputs[0]->handle->dtype,
680+
num_inputs);
682681
} else {
683682
return tensorflow::errors::InvalidArgument("Invalid input list definition");
684683
}
@@ -754,7 +753,9 @@ TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
754753
return list;
755754
}
756755

757-
void TFE_ContextClearCaches(TFE_Context* ctx) { ctx->context->ClearCaches(); }
756+
void TFE_ContextClearCaches(TFE_Context* ctx) {
757+
ctx->context->ClearCachesAndThreadExecutors();
758+
}
758759

759760
// Set server_def on the context, possibly updating it.
760761
TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
@@ -990,7 +991,7 @@ TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h, TF_Status* status) {
990991
h_cpu->Unref();
991992
return nullptr;
992993
}
993-
TF_Tensor* retval = tensorflow::TF_TensorFromTensor(*t, status);
994+
TF_Tensor* retval = tensorflow::TF_TensorFromTensor(*t, &status->status);
994995
h_cpu->Unref();
995996
return retval;
996997
} else {
@@ -1006,7 +1007,7 @@ TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h, TF_Status* status) {
10061007
status->status = h->handle->CopyToDevice(ctx, ctx->HostCPU(), &tensor);
10071008
if (!status->status.ok()) return nullptr;
10081009
}
1009-
return tensorflow::TF_TensorFromTensor(tensor, status);
1010+
return tensorflow::TF_TensorFromTensor(tensor, &status->status);
10101011
}
10111012
}
10121013

tensorflow/c/eager/c_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ typedef struct TFE_TensorDebugInfo TFE_TensorDebugInfo;
206206
// error and nullptr is returned. This function can block till the operation
207207
// that produces `handle` has completed.
208208
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
209-
TFE_TensorHandle* handle, TF_Status* status);
209+
TFE_TensorHandle* h, TF_Status* status);
210210

211211
// Deletes `debug_info`.
212212
TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(

0 commit comments

Comments
 (0)