Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix all warnings & Add option TREAT_WARING_AS_ERROR to cmake #5751

Merged
merged 19 commits into from
Aug 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,25 +134,25 @@ jobs:
include:
- test_suite: cuda
cuda_version: 10.2
extra_flags: --extra_oneflow_cmake_args=-DCUDA_NVCC_GENCODES=arch=compute_61,code=sm_61 --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple
extra_flags: --extra_oneflow_cmake_args=-DCUDA_NVCC_GENCODES=arch=compute_61,code=sm_61 --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --extra_oneflow_cmake_args=-DTREAT_WARNINGS_AS_ERRORS=OFF
os: [self-hosted, linux, build]
allow_fail: false
python_version: 3.6,3.7
- test_suite: cpu
cuda_version: 10.2
extra_flags: --extra_oneflow_cmake_args=-DBUILD_SHARED_LIBS=OFF --extra_oneflow_cmake_args=-DRPC_BACKEND=LOCAL --cpu --gcc7
extra_flags: --extra_oneflow_cmake_args=-DBUILD_SHARED_LIBS=OFF --extra_oneflow_cmake_args=-DRPC_BACKEND=LOCAL --cpu --gcc7 --extra_oneflow_cmake_args=-DTREAT_WARNINGS_AS_ERRORS=OFF
os: [self-hosted, linux, build]
allow_fail: false
python_version: 3.6,3.7
- test_suite: xla
cuda_version: 10.1
extra_flags: --extra_oneflow_cmake_args=-DCUDA_NVCC_GENCODES=arch=compute_61,code=sm_61 --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --xla --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple
extra_flags: --extra_oneflow_cmake_args=-DCUDA_NVCC_GENCODES=arch=compute_61,code=sm_61 --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --xla --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --extra_oneflow_cmake_args=-DTREAT_WARNINGS_AS_ERRORS=OFF
os: [self-hosted, linux, build]
allow_fail: true
python_version: 3.6
- test_suite: xla_cpu
cuda_version: 10.1
extra_flags: --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --xla --cpu --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple
extra_flags: --extra_oneflow_cmake_args=-DRPC_BACKEND=GRPC,LOCAL --xla --cpu --extra_oneflow_cmake_args=-DPIP_INDEX_MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --extra_oneflow_cmake_args=-DTREAT_WARNINGS_AS_ERRORS=OFF
os: [self-hosted, linux, build]
allow_fail: true
python_version: 3.6
Expand Down
6 changes: 2 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ option(WITH_COCOAPI "Option to build with COCO API" ON)
option(BUILD_GIT_VERSION "" ON)
option(BUILD_PROFILER "" OFF)
option(OF_SOFTMAX_USE_FAST_MATH "" ON)
option(TREAT_WARNINGS_AS_ERRORS "" ON)
set(RPC_BACKEND "GRPC,LOCAL" CACHE STRING "")
set(THIRD_PARTY_MIRROR "" CACHE STRING "")
set(PIP_INDEX_MIRROR "" CACHE STRING "")
Expand Down Expand Up @@ -126,10 +127,7 @@ if(WIN32)
#set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS} /DEBUG:FASTLINK")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /D_ITERATOR_DEBUG_LEVEL=0")
else()
set(EXTRA_CXX_FLAGS "-std=c++11 -Wall -Wno-sign-compare -Wno-unused-function -fPIC -Werror=return-type")
if (APPLE)
set(EXTRA_CXX_FLAGS "${EXTRA_CXX_FLAGS} -Wno-deprecated-declarations -Wno-mismatched-tags")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里 -Wno-deprecated-declarations 现在被换成了 -Wno-error=deprecated-declarations?是不是行为不一样

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是的,-Wno-deprecated-declarations 是不报出 deprecated-declarations 类型的警告,-Wno-error=deprecated-declarations 是报出 deprecated-declarations 类型的警告但不将其视为错误,我感觉这里报出来但不视为错误更合理一些

endif()
set(EXTRA_CXX_FLAGS "-std=c++11 -Wall -Wno-sign-compare -Wno-unused-function -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${EXTRA_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${EXTRA_CXX_FLAGS}")
Expand Down
30 changes: 30 additions & 0 deletions cmake/oneflow.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,8 @@ if(BUILD_CUDA)
add_dependencies(of_cudaobj of_protoobj of_cfgobj prepare_oneflow_third_party)
target_link_libraries(of_cudaobj ${oneflow_third_party_libs})
set(ONEFLOW_CUDA_LIBS of_cudaobj)

target_compile_options(of_cudaobj PRIVATE -Werror=return-type)
endif()

# cc obj lib
Expand All @@ -254,6 +256,33 @@ if (BUILD_SHARED_LIBS)
target_link_libraries(of_ccobj of_protoobj of_cfgobj ${ONEFLOW_CUDA_LIBS} glog_imported)
endif()

target_compile_options(of_ccobj PRIVATE -Werror=return-type)

if (TREAT_WARNINGS_AS_ERRORS)
target_compile_options(of_ccobj PRIVATE -Werror)

# TODO: remove it while fixing all deprecated call
target_compile_options(of_ccobj PRIVATE -Wno-error=deprecated-declarations)

# disable unused-* for different compile mode (maybe unused in cpu.cmake, but used in cuda.cmake)
target_compile_options(of_ccobj PRIVATE -Wno-error=unused-const-variable)
target_compile_options(of_ccobj PRIVATE -Wno-error=unused-variable)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
target_compile_options(of_ccobj PRIVATE -Wno-error=unused-private-field)
target_compile_options(of_ccobj PRIVATE -Wno-error=unused-local-typedef)
target_compile_options(of_ccobj PRIVATE -Wno-error=unused-lambda-capture)
target_compile_options(of_ccobj PRIVATE -Wno-error=instantiation-after-specialization)
endif()

if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# the mangled name between `struct X` and `class X` is different in MSVC ABI, remove it while windows is supported (in MSVC/cl or clang-cl)
target_compile_options(of_ccobj PRIVATE -Wno-error=mismatched-tags)

# TODO: remove it while `oneflow/user/kernels/upsample_kernel.h:141:9: error: implicit conversion from 'double' to 'int' changes value from -0.75 to 0 [-Wliteral-conversion]` is fixed
target_compile_options(of_ccobj PRIVATE -Wno-error=literal-conversion)
endif()
endif()

# py ext lib
add_library(of_pyext_obj ${of_pyext_obj_cc})
target_include_directories(of_pyext_obj PRIVATE ${Python_INCLUDE_DIRS} ${Python_NumPy_INCLUDE_DIRS})
Expand All @@ -262,6 +291,7 @@ if(BUILD_SHARED_LIBS AND APPLE)
target_link_libraries(of_pyext_obj ${Python3_LIBRARIES})
endif()
add_dependencies(of_pyext_obj of_ccobj)
target_compile_options(of_pyext_obj PRIVATE -Werror=return-type)

if(APPLE)
set(of_libs -Wl,-force_load ${ONEFLOW_CUDA_LIBS} of_ccobj of_protoobj of_cfgobj)
Expand Down
2 changes: 2 additions & 0 deletions oneflow/core/autograd/gradient_funcs/adaptive_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ struct AdaptivePoolInterpState : public OpExprInterpState {

class AdaptivePoolNdGrad : public OpExprGradFunction<AdaptivePoolInterpState> {
public:
using OpExprGradFunction<AdaptivePoolInterpState>::Init;

Maybe<void> Init(const OpExpr& op, std::string mode, const int& ndims);
Maybe<void> Capture(AdaptivePoolInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override;
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/autograd/gradient_funcs/avg_pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ struct AvgPoolingInterpState : public OpExprInterpState {
class AvgPoolingNdGrad : public OpExprGradFunction<AvgPoolingInterpState> {
public:
virtual ~AvgPoolingNdGrad() = default;
Maybe<void> Init(const OpExpr& op);
Maybe<void> Init(const OpExpr& op) override;
Maybe<void> Capture(AvgPoolingInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override;
Maybe<void> Apply(const AvgPoolingInterpState* ctx, const TensorTuple& out_grads,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class BroadcastBinaryGrad : public OpExprGradFunction<OpExprInterpState> {
BroadcastBinaryGrad() = default;
virtual ~BroadcastBinaryGrad() = default;

virtual Maybe<void> Init(const OpExpr& op) { return Maybe<void>::Ok(); }
virtual Maybe<void> Init(const OpExpr& op) override { return Maybe<void>::Ok(); }

Maybe<void> Capture(OpExprInterpState* ctx, const TensorTuple& inputs, const TensorTuple& outputs,
const AttrMap& attrs) const override {
Expand Down
3 changes: 3 additions & 0 deletions oneflow/core/autograd/gradient_funcs/pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ struct PoolInterpState : public OpExprInterpState {
class PoolNdGrad : public OpExprGradFunction<PoolInterpState> {
public:
virtual ~PoolNdGrad() = default;

using OpExprGradFunction<PoolInterpState>::Init;

Maybe<void> Init(const OpExpr& op, const std::string& mode);
Maybe<void> Capture(PoolInterpState* ctx, const TensorTuple& inputs, const TensorTuple& outputs,
const AttrMap& attrs) const override;
Expand Down
3 changes: 3 additions & 0 deletions oneflow/core/autograd/gradient_funcs/pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ struct PoolingInterpState : public OpExprInterpState {
class PoolingNdGrad : public OpExprGradFunction<PoolingInterpState> {
public:
virtual ~PoolingNdGrad() = default;

using OpExprGradFunction<PoolingInterpState>::Init;

Maybe<void> Init(const OpExpr& op, const std::string& mode);
Maybe<void> Capture(PoolingInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override;
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/common/cached_object_msg_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ limitations under the License.

namespace oneflow {

class ObjMsgChunk;
struct ObjMsgChunk;

struct ObjMsgMemBlock final {
public:
Expand All @@ -41,7 +41,7 @@ struct ObjMsgMemBlock final {
char mem_ptr_[0];
};

class ObjMsgSizedMemPool;
struct ObjMsgSizedMemPool;

// clang-format off
OBJECT_MSG_BEGIN(ObjMsgChunk);
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/common/maybe.h
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ inline bool MaybeIsOk(Maybe<void>&& maybe) {
})(__FUNCTION__) \
.Data_YouAreNotAllowedToCallThisFuncOutsideThisFile()

#define CHECK_OK(...) CHECK(MaybeIsOk(std::move(__VA_ARGS__)))
#define CHECK_OK(...) CHECK(MaybeIsOk(__VA_ARGS__))

#define OF_RETURN_IF_ERROR(...) \
for (MAYBE_CONST_AUTO_REF maybe_##__LINE__ = __MaybeErrorStackCheckWrapper__(__VA_ARGS__); \
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/common/symbol.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ limitations under the License.
namespace oneflow {

template<typename T>
class SymbolUtil;
struct SymbolUtil;

template<typename T>
class Symbol final {
Expand Down
7 changes: 7 additions & 0 deletions oneflow/core/eager/opkernel_instruction_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
#define ONEFLOW_CORE_EAGER_CALL_OPKERNEL_INSTRUCTION_H_

#include "oneflow/core/eager/opkernel_instruction.msg.h"
#include "oneflow/core/vm/instr_type_id.h"
#include "oneflow/core/vm/instruction.msg.h"
#include "oneflow/core/vm/instruction_type.h"
#include "oneflow/core/memory/memory_case.pb.h"
Expand Down Expand Up @@ -60,6 +61,9 @@ class UserStatelessCallOpKernelInstructionType : public vm::InstructionType {
void Infer(vm::Instruction* instruction) const override;
void Compute(vm::Instruction* instruction) const override;

using vm::InstructionType::Compute;
using vm::InstructionType::Infer;

protected:
UserStatelessCallOpKernelInstructionType() = default;
virtual ~UserStatelessCallOpKernelInstructionType() = default;
Expand All @@ -80,6 +84,9 @@ class SystemStatelessCallOpKernelInstructionType : public vm::InstructionType {
virtual std::shared_ptr<MemoryCase> GetOutBlobMemCase(const DeviceType device_type,
const int64_t device_id) const;

using vm::InstructionType::Compute;
using vm::InstructionType::Infer;

protected:
SystemStatelessCallOpKernelInstructionType() = default;
virtual ~SystemStatelessCallOpKernelInstructionType() = default;
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/eager/opkernel_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ limitations under the License.

namespace oneflow {

class KernelCtx;
struct KernelCtx;
class Blob;
class ParallelContext;

Expand Down
8 changes: 4 additions & 4 deletions oneflow/core/framework/config_def.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ const ConfigDefBuidler<config_def_type>& ConfigDefBuidler<config_def_type>::List
return *this;
}

template class ConfigDefBuidler<kEnvConfigDefType>;
template class ConfigDefBuidler<kSessionConfigDefType>;
template class ConfigDefBuidler<kFunctionConfigDefType>;
template class ConfigDefBuidler<kScopeConfigDefType>;
template struct ConfigDefBuidler<kEnvConfigDefType>;
template struct ConfigDefBuidler<kSessionConfigDefType>;
template struct ConfigDefBuidler<kFunctionConfigDefType>;
template struct ConfigDefBuidler<kScopeConfigDefType>;

} // namespace oneflow
2 changes: 1 addition & 1 deletion oneflow/core/framework/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class Device final {
Device(const Device&) = default;
Device(Device&&) = default;
~Device() = default;
Device& operator=(const Device&) = default;
Device& operator=(const Device&) = delete;
const std::string& type() const { return type_; }
Maybe<const std::string&> of_type() const;
int64_t device_id() const { return device_id_; }
Expand Down
6 changes: 3 additions & 3 deletions oneflow/core/framework/instructions_builder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1166,7 +1166,7 @@ Maybe<void> InstructionsBuilder::StatefulCall(
};

const auto GetDelegateBlobObject =
[this, &FetchDelegateBlobObject](
[&FetchDelegateBlobObject](
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里是什么原因需要捕获this的

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不是很清楚,可能之前有用到后来删掉了?不过这个 lambda 应该确实不需要捕获 this

const std::shared_ptr<compatible_py::BlobObject>& blob_object,
const std::shared_ptr<compatible_py::OpArgParallelAttribute>& op_arg_parallel_attr)
-> Maybe<compatible_py::BlobObject> {
Expand Down Expand Up @@ -1199,7 +1199,7 @@ Maybe<void> InstructionsBuilder::StatelessCall(
};

const auto GetDelegateBlobObject =
[this, &FetchDelegateBlobObject](
[&FetchDelegateBlobObject](
const std::shared_ptr<compatible_py::BlobObject>& blob_object,
const std::shared_ptr<compatible_py::OpArgParallelAttribute>& op_arg_parallel_attr)
-> Maybe<compatible_py::BlobObject> {
Expand Down Expand Up @@ -1242,7 +1242,7 @@ Maybe<void> InstructionsBuilder::NoBoxingStatelessCall(
};

const auto GetDirectOr121BlobObject =
[this, &FetchDelegateBlobObject](
[&FetchDelegateBlobObject](
const std::shared_ptr<compatible_py::BlobObject>& blob_object,
const std::shared_ptr<compatible_py::OpArgParallelAttribute>& op_arg_parallel_attr)
-> Maybe<compatible_py::BlobObject> {
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/framework/op_expr.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class ConsistentTensorInferCache;

class UserOpExpr final : public BuiltinOpExprImpl<UserOpConf> {
public:
UserOpExpr() = default;
UserOpExpr() = delete;
virtual ~UserOpExpr() = default;

static Maybe<UserOpExpr> New(const std::string& op_name, UserOpConf&& op_proto,
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/framework/tensor_rpc_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ limitations under the License.

namespace oneflow {

class FlatTensorConsistency;
struct FlatTensorConsistency;

class CheckConsistencyAsyncTransportCtx : public AsyncTransportCtx {
public:
Expand Down
3 changes: 1 addition & 2 deletions oneflow/core/framework/user_op_conf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,8 +249,7 @@ UserOpConfWrapper& BackwardOpConfContext::GetOp(const std::string& op_name) {
CHECK(fn_it != op_builder_fns_.end()) << " op_name " << op_name << " has no builder function.";
CHECK(fn_it->second != nullptr) << " op_name " << op_name << " builder function is null.";
UserOpConfWrapperBuilder builder(op_name);
auto ret =
op_builder_results_.emplace(std::make_pair(op_name, std::move(fn_it->second(builder))));
auto ret = op_builder_results_.emplace(std::make_pair(op_name, fn_it->second(builder)));
CHECK(ret.second == true) << " op_name " << op_name << " build result insert failed.";

// add new op conf
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/framework/vm_local_dep_object.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ ObjectMsgPtr<LocalDepObject> GetRecycledLocalDepObject(const ParallelDesc& paral
ObjectMsgPtr<LocalDepObject> object = thread_local_free_list->Begin();
thread_local_free_list->Erase(object.Mutable());
CHECK_EQ(object->ref_cnt(), 1); // hold by `object` only
return std::move(object);
return object;
}

void MoveLocalDepObjectToZombieList(ObjectMsgPtr<LocalDepObject>&& local_dep_object) {
Expand Down
1 change: 1 addition & 0 deletions oneflow/core/functional/packed_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ using remove_cvref_t = oneflow::detail::remove_cvref_t<T>;

struct FunctionBody {
virtual operator void*() = 0;
virtual ~FunctionBody() = default;
};

template<typename T>
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/job/job_build_and_infer_ctx_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ limitations under the License.
namespace oneflow {

Maybe<void> JobBuildAndInferCtxMgr::OpenJobBuildAndInferCtx(const std::string& job_name) {
CHECK_OR_RETURN(!has_cur_job_) << Error::UnknownJobBuildAndInferError
CHECK_OR_RETURN(!has_cur_job_) << Error::UnknownJobBuildAndInferError()
<< "cur job not leave before you enter this job_name:" << job_name;
CHECK_OR_RETURN(!job_name.empty()) << Error::JobNameEmptyError();
CHECK_OR_RETURN(job_name2infer_ctx_.find(job_name) == job_name2infer_ctx_.end())
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/job/job_build_and_infer_ctx_mgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class LazyJobBuildAndInferCtxMgr : public JobBuildAndInferCtxMgr {
friend class Global<LazyJobBuildAndInferCtxMgr>;

Maybe<void> VirtualCloseJob() override;
JobBuildAndInferCtx* NewJobBuildAndInferCtx(Job* job, int64_t job_id) const;
JobBuildAndInferCtx* NewJobBuildAndInferCtx(Job* job, int64_t job_id) const override;
};

class EagerJobBuildAndInferCtxMgr : public JobBuildAndInferCtxMgr {
Expand All @@ -76,7 +76,7 @@ class EagerJobBuildAndInferCtxMgr : public JobBuildAndInferCtxMgr {
friend class Global<EagerJobBuildAndInferCtxMgr>;

Maybe<void> VirtualCloseJob() override;
JobBuildAndInferCtx* NewJobBuildAndInferCtx(Job* job, int64_t job_id) const;
JobBuildAndInferCtx* NewJobBuildAndInferCtx(Job* job, int64_t job_id) const override;
};

bool EagerExecutionEnabled();
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/job/session_global_objects_scope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ Maybe<void> SessionGlobalObjectsScope::EagerInit(const ConfigProto& config_proto
Global<ResourceDesc, ForSession>::Delete();
DumpVersionInfo();
Global<ResourceDesc, ForSession>::New(config_proto.resource());
for (const std::string lib_path : config_proto.load_lib_path()) { JUST(LoadLibrary(lib_path)); }
for (const std::string& lib_path : config_proto.load_lib_path()) { JUST(LoadLibrary(lib_path)); }
return Maybe<void>::Ok();
}

Expand Down
1 change: 1 addition & 0 deletions oneflow/core/job_rewriter/autotick.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class MutOpConTickInputHelper {
virtual bool VirtualIsTickInputBound() const = 0;
virtual OperatorConf NewTickInputBoundOpConf(const std::string& lbn) const = 0;
void InitFromOpConf(const OperatorConf& op_conf) { op_conf_ = &op_conf; }
virtual ~MutOpConTickInputHelper() = default;

protected:
MutOpConTickInputHelper() : op_conf_(nullptr) {}
Expand Down
6 changes: 4 additions & 2 deletions oneflow/core/job_rewriter/job_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ class JobPassCtx {
const auto& iter = key2state_.find(key);
CHECK_OR_RETURN(iter != key2state_.end());
const T* ptr = dynamic_cast<T*>(iter->second.get());
CHECK_NOTNULL_OR_RETURN(ptr) << typeid(*iter->second).name();
const auto& origin_obj = *iter->second;
CHECK_NOTNULL_OR_RETURN(ptr) << typeid(origin_obj).name();
return *ptr;
}

Expand All @@ -64,7 +65,8 @@ class JobPassCtx {
const auto& iter = key2state_.find(key);
CHECK_OR_RETURN(iter != key2state_.end());
T* ptr = dynamic_cast<T*>(iter->second.get());
CHECK_NOTNULL_OR_RETURN(ptr) << typeid(*iter->second).name();
const auto& origin_obj = *iter->second;
CHECK_NOTNULL_OR_RETURN(ptr) << typeid(origin_obj).name();
return ptr;
}

Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/kernel/slice_boxing_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class SliceBoxingCopyKernel final : public SliceBoxingKernel<device_type, T> {
~SliceBoxingCopyKernel() override = default;

private:
virtual const SliceBoxingConf& GetCustomizedBoxingConf() const;
virtual const SliceBoxingConf& GetCustomizedBoxingConf() const override;
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
Expand All @@ -61,7 +61,7 @@ class SliceBoxingAddKernel final : public SliceBoxingKernel<device_type, T> {
~SliceBoxingAddKernel() override = default;

private:
virtual const SliceBoxingConf& GetCustomizedBoxingConf() const;
virtual const SliceBoxingConf& GetCustomizedBoxingConf() const override;
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
Expand Down
Loading