Skip to content

Commit ba5d33b

Browse files
bddppqfacebook-github-bot
authored andcommitted
Re-Enable ATen in C2 in integration builds to test ONNX ATen conversions
Summary: Pull Request resolved: pytorch#10060 Differential Revision: D9081387 Pulled By: bddppq fbshipit-source-id: 13cbff63df5241e013d4ebacfcd6da082e7196f6
1 parent e04f8bb commit ba5d33b

File tree

5 files changed

+25
-2
lines changed

5 files changed

+25
-2
lines changed

.jenkins/caffe2/build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ CMAKE_ARGS+=("-DUSE_OBSERVERS=ON")
124124
CMAKE_ARGS+=("-DUSE_ZSTD=ON")
125125
CMAKE_ARGS+=("-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}")
126126
127-
if [[ $BUILD_ENVIRONMENT == *-aten-* ]]; then
127+
if [[ $BUILD_ENVIRONMENT == *-aten-* || -n "$INTEGRATED" ]]; then
128128
if [[ CMAKE_ARGS != *USE_ATEN* ]] && [[ CMAKE_ARGS != *BUILD_ATEN* ]]; then
129129
CMAKE_ARGS+=("-DBUILD_ATEN=ON")
130130
fi

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ if(NOT MSVC)
216216
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
217217
# These flags are not available in GCC-4.8.5. Set only when using clang.
218218
# Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html
219-
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
219+
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
220220
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization")
221221
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-typedef-redefinition")
222222
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-warning-option")
@@ -226,6 +226,7 @@ if(NOT MSVC)
226226
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-c++14-extensions")
227227
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-constexpr-not-const")
228228
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-braces")
229+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments")
229230
endif()
230231
if ((APPLE AND (NOT ("${CLANG_VERSION_STRING}" VERSION_LESS "9.0")))
231232
OR (CMAKE_COMPILER_IS_GNUCXX

cmake/MiscCheck.cmake

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,15 @@ if (${COMPILER_SUPPORTS_HIDDEN_INLINE_VISIBILITY})
161161
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CAFFE2_VISIBILITY_FLAG}")
162162
endif()
163163

164+
# ---[ Checks if linker supports -rdynamic. `-rdynamic` tells linker
165+
# -to add all (including unused) symbols into the dynamic symbol
166+
# -table. We need this to get symbols when generating backtrace at
167+
# -runtime.
168+
check_cxx_compiler_flag("-rdynamic" COMPILER_SUPPORTS_RDYNAMIC)
169+
if (${COMPILER_SUPPORTS_RDYNAMIC})
170+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -rdynamic")
171+
endif()
172+
164173
# ---[ If we are using msvc, set no warning flags
165174
# Note(jiayq): if you are going to add a warning flag, check if this is
166175
# totally necessary, and only add when you see fit. If it is needed due to

setup_caffe2.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ def run(self):
131131
# configure
132132
cmake_args = [
133133
find_executable('cmake'),
134+
'-DUSE_ATEN=ON',
134135
'-DBUILD_SHARED_LIBS=OFF',
135136
'-DPYTHON_EXECUTABLE:FILEPATH={}'.format(sys.executable),
136137
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_python_inc()),

test/onnx/test_pytorch_onnx_caffe2.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -676,6 +676,18 @@ def forward(self, x):
676676
x = Variable(torch.randn(*shape))
677677
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
678678

679+
def test_cumsum(self):
680+
shape = (3, 4, 5)
681+
for params in [{'dim': i} for i in range(len(shape))]:
682+
class MyModel(torch.nn.Module):
683+
def __init__(self):
684+
super(MyModel, self).__init__()
685+
686+
def forward(self, x):
687+
return torch.cumsum(x, **params)
688+
x = Variable(torch.randn(*shape))
689+
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
690+
679691
def test_repeat(self):
680692
class MyModel(torch.nn.Module):
681693
def __init__(self):

0 commit comments

Comments
 (0)