Skip to content

Commit

Permalink
[clang-tidy] NO.32 clang-analyzer-deadcode.DeadStores (#57483)
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee authored Sep 20, 2023
1 parent 0d33b16 commit 2df18c3
Show file tree
Hide file tree
Showing 18 changed files with 21 additions and 37 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ clang-analyzer-cplusplus.InnerPointer,
-clang-analyzer-cplusplus.SelfAssignment,
-clang-analyzer-cplusplus.SmartPtr,
-clang-analyzer-cplusplus.VirtualCallModeling,
-clang-analyzer-deadcode.DeadStores,
clang-analyzer-deadcode.DeadStores,
-clang-analyzer-fuchsia.HandleChecker,
-clang-analyzer-nullability.NullPassedToNonnull,
-clang-analyzer-nullability.NullReturnedFromNonnull,
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1109,7 +1109,8 @@ void EagerReducer::AllReduceSparse(EagerGroup *group,

VLOG(3) << "sparse_group [" << curr_group_index << "] start allreduce.";

auto *dev_ctx = platform::DeviceContextPool::Instance().Get(inner_place_);
auto *dev_ctx =
platform::DeviceContextPool::Instance().Get(inner_place_); // NOLINT
if (platform::is_gpu_place(inner_place_)) {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
dev_ctx = static_cast<phi::GPUContext *>(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ static int compute_thread_batch_nccl(
auto& offset = (*nccl_offsets);
// split data avg by thread num
compute_batch_num(total_instance_num, minibatch_size, thr_num, &offset);
thread_avg_batch_num = static_cast<int>(offset.size() / thr_num);
thread_avg_batch_num = static_cast<int>(offset.size() / thr_num); // NOLINT
#ifdef PADDLE_WITH_GLOO
auto gloo_wrapper = paddle::framework::GlooWrapper::GetInstance();
if (gloo_wrapper->Size() > 1) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fuse_adamw_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ void FuseAdamWPass::ApplyImpl(ir::Graph *graph) const {
graph = FuseAdamWFun(graph, true, true);
graph = FuseAdamWFun(graph, true, false);
graph = FuseAdamWFun(graph, false, true);
graph = FuseAdamWFun(graph, false, false);
graph = FuseAdamWFun(graph, false, false); // NOLINT
}

ir::Graph *FuseAdamWPass::FuseAdamWFun(ir::Graph *graph,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fuse_bn_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void FuseBatchNormActPass::ApplyImpl(ir::Graph *graph) const {
graph = FuseBatchNormAct(graph, act_types);
// backward
std::unordered_set<std::string> act_grad_types = {"relu_grad"};
graph = FuseBatchNormActGrad(graph, act_grad_types);
graph = FuseBatchNormActGrad(graph, act_grad_types); // NOLINT
#endif
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void FuseBatchNormAddActPass::ApplyImpl(ir::Graph *graph) const {
graph = FuseBatchNormAddAct(graph, act_types);
// backward
std::unordered_set<std::string> act_grad_types = {"relu_grad"};
graph = FuseBatchNormAddActGrad(graph, act_grad_types);
graph = FuseBatchNormAddActGrad(graph, act_grad_types); // NOLINT
#endif
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ void FuseGemmEpiloguePass::ApplyImpl(ir::Graph *graph) const {
graph = FuseLinearActBwd(graph, {"relu_grad"}, true, &cache);
graph = FuseLinearActBwd(graph, {"gelu_grad"}, false, &cache);
graph = FuseLinearBwd(graph, false);
graph = FuseLinearBwd(graph, true);
graph = FuseLinearBwd(graph, true); // NOLINT
}

ir::Graph *FuseGemmEpiloguePass::FuseLinearFwd(ir::Graph *graph,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace ir {

void FuseReluDepthwiseConvPass::ApplyImpl(ir::Graph *graph) const {
graph = FuseReluDepthwiseConv(graph, true);
graph = FuseReluDepthwiseConv(graph, false);
graph = FuseReluDepthwiseConv(graph, false); // NOLINT
}

ir::Graph *FuseReluDepthwiseConvPass::FuseReluDepthwiseConv(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fused_attention_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -847,7 +847,7 @@ void FusedAttentionsPass::ApplyImpl(Graph* graph) const {
cache.ResetCache();

graph = PreMaskDropResMPFwd(graph, &cache);
graph = PreMaskDropResMPBwd(graph, &cache);
graph = PreMaskDropResMPBwd(graph, &cache); // NOLINT
cache.ResetCache();
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout(
std::set<int> dst_layouts;
std::set<int> src_layouts;

auto *scope = param_scope();
auto *scope = param_scope(); // NOLINT

for (auto var : op_node->inputs) {
// If this input is a 1D persistable tensor,we allow transfer_layout not
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1512,7 +1512,7 @@ std::vector<ir::Graph *> ParallelExecutor::CompileGraphWithBuildStrategy(
device_count,
graphs.size()));
VLOG(3) << "use local async mode";
graph = member_->build_strategy_.Apply(graph,
graph = member_->build_strategy_.Apply(graph, // NOLINT
{member_->places_[0]},
loss_var_name,
{member_->local_scopes_[0]},
Expand All @@ -1530,7 +1530,7 @@ std::vector<ir::Graph *> ParallelExecutor::CompileGraphWithBuildStrategy(
async_graphs[i] = graphs[i];
}
} else {
graph = member_->build_strategy_.Apply(graph,
graph = member_->build_strategy_.Apply(graph, // NOLINT
member_->places_,
loss_var_name,
member_->local_scopes_,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/layout_autotune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ LayoutAutoTune::LayoutAutoTune() {
layout_agnostic == true) {
VLOG(4) << "Heavily layout sensitive OP: " << info.first;
heavily_layout_sensitive_ops_.emplace(info.first);
layout_agnostic = false;
layout_agnostic = false; // NOLINT
continue;
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/memory/allocation/buddy_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ void* BuddyAllocator::SystemAlloc(size_t size) {

BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
size_t request_bytes) {
size_t allocate_bytes = max_chunk_size_;
size_t allocate_bytes = max_chunk_size_; // NOLINT
size_t index = 0;

#ifdef PADDLE_WITH_CUSTOM_DEVICE
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/gen_comm_id_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
VLOG(3) << "socket read failed with ret_val=" << ret_val;
CloseSocket(sock);
}
sock = -1;
sock = -1; // NOLINT
CHECK_SYS_CALL_VAL(socket(AF_INET, SOCK_STREAM, 0), "socket", sock);
// unmatched link, retry after 80ms
std::this_thread::sleep_for(std::chrono::milliseconds(80));
Expand Down
18 changes: 0 additions & 18 deletions paddle/phi/api/lib/tensor_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -128,24 +128,6 @@ void Tensor::copy_(const Tensor &src,
auto *dev_ctx = pool.GetMutable(
place.GetType() == target_place.GetType() ? target_place : place);

Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;

if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}

if (kernel_type == KernelType::DENSE_TENSOR_KENREL) {
SetKernelOutput(this);
phi::MetaTensor meta_out(impl_.get());
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/distributed/store/tcp_store.cc
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ void MasterDaemon::run() {
}
VLOG(0)
<< "receive shutdown event and so quit from MasterDaemon run loop";
finished = true;
finished = true; // NOLINT
break;
}
#endif
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/gpc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1048,7 +1048,7 @@ void gpc_polygon_clip(gpc_op op,
px = -DBL_MAX;
/* Create bundles within AET */
e0 = aet;
e1 = aet;
e1 = aet; // NOLINT
/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(
aet, phi::errors::InvalidArgument("Edge node AET is nullptr."));
Expand Down Expand Up @@ -1689,7 +1689,7 @@ void gpc_tristrip_clip(gpc_op op,
/* Create bundles within AET */
px = -DBL_MAX;
e0 = aet;
e1 = aet;
e1 = aet; // NOLINT

/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(
Expand Down
3 changes: 2 additions & 1 deletion paddle/pir/core/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

namespace pir {
std::size_t hash_combine(std::size_t lhs, std::size_t rhs) {
return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2);
lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2);
return lhs;
}

void *aligned_malloc(size_t size, size_t alignment) {
Expand Down

0 comments on commit 2df18c3

Please sign in to comment.