Skip to content

Commit

Permalink
[CodeStyle] trim trailing whitespace in .h .cc .cu .cuh .cpp .cmake C…
Browse files Browse the repository at this point in the history
…MakeLists.txt

restore files in patches/
  • Loading branch information
SigureMo committed Sep 13, 2022
1 parent 9f5b083 commit 5ed27b0
Show file tree
Hide file tree
Showing 157 changed files with 627 additions and 622 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ repos:
description: Format files with ClangFormat.
entry: bash ./tools/codestyle/clang_format.hook -i
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|xpu|kps)$
files: \.(c|cc|cxx|cpp|cu|cuh|h|hpp|hxx|xpu|kps)$
exclude: |
(?x)^(
paddle/fluid/distributed/ps/thirdparty/round_robin.h
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -511,9 +511,9 @@ class CustomOpMaker : public OpProtoAndCheckerMaker {
AddComment(R"DOC(
Custom Operator.
According to the Tensor operation function implemented by the user
independently of the framework, it is encapsulated into a framework
operator to adapt to various execution scenarios such as dynamic graph,
According to the Tensor operation function implemented by the user
independently of the framework, it is encapsulated into a framework
operator to adapt to various execution scenarios such as dynamic graph,
mode static graph mode, and inference mode.
)DOC");
Expand Down
26 changes: 15 additions & 11 deletions paddle/fluid/framework/fleet/heter_ps/cudf/hash_functions.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
* in the root directory of this source tree.
*/

#ifndef HASH_FUNCTIONS_CUH
#define HASH_FUNCTIONS_CUH
#ifndef PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_HASH_FUNCTIONS_CUH_
#define PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_HASH_FUNCTIONS_CUH_

using hash_value_type = uint32_t;

Expand All @@ -39,7 +39,8 @@ struct MurmurHash3_32 {

__forceinline__ __host__ __device__ MurmurHash3_32() : m_seed(0) {}

__forceinline__ __host__ __device__ uint32_t rotl32(uint32_t x, int8_t r) const {
__forceinline__ __host__ __device__ uint32_t rotl32(uint32_t x,
int8_t r) const {
return (x << r) | (x >> (32 - r));
}

Expand All @@ -52,7 +53,7 @@ struct MurmurHash3_32 {
return h;
}

/* --------------------------------------------------------------------------*/
/* ------------------------------------------------------------------------*/
/**
* @Synopsis Combines two hash values into a new single hash value. Called
* repeatedly to create a hash value from several variables.
Expand All @@ -62,18 +63,21 @@ struct MurmurHash3_32 {
* @Param lhs The first hash value to combine
* @Param rhs The second hash value to combine
*
* @Returns A hash value that intelligently combines the lhs and rhs hash values
* @Returns A hash value that intelligently combines the lhs and rhs hash
* values
*/
/* ----------------------------------------------------------------------------*/
__host__ __device__ result_type hash_combine(result_type lhs, result_type rhs) {
/* ------------------------------------------------------------------------*/
__host__ __device__ result_type hash_combine(result_type lhs,
result_type rhs) {
result_type combined{lhs};

combined ^= rhs + 0x9e3779b9 + (combined << 6) + (combined >> 2);

return combined;
return combined;
}

__forceinline__ __host__ __device__ result_type operator()(const Key& key) const {
__forceinline__ __host__ __device__ result_type
operator()(const Key& key) const {
constexpr int len = sizeof(argument_type);
const uint8_t* const data = (const uint8_t*)&key;
constexpr int nblocks = len / 4;
Expand Down Expand Up @@ -107,7 +111,7 @@ struct MurmurHash3_32 {
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
}
//----------
// finalization
h1 ^= len;
Expand All @@ -122,4 +126,4 @@ struct MurmurHash3_32 {
template <typename Key>
using default_hash = MurmurHash3_32<Key>;

#endif // HASH_FUNCTIONS_CUH
#endif // PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_HASH_FUNCTIONS_CUH_
6 changes: 3 additions & 3 deletions paddle/fluid/framework/fleet/heter_ps/cudf/managed.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
* in the root directory of this source tree.
*/

#ifndef MANAGED_CUH
#define MANAGED_CUH
#ifndef PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_CUH_
#define PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_CUH_

#include <new>

Expand All @@ -34,4 +34,4 @@ struct managed {
static void operator delete(void *ptr) noexcept { cudaFree(ptr); }
};

#endif // MANAGED_CUH
#endif // PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_CUH_
11 changes: 6 additions & 5 deletions paddle/fluid/framework/fleet/heter_ps/cudf/managed_allocator.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
* in the root directory of this source tree.
*/

#ifndef MANAGED_ALLOCATOR_CUH
#define MANAGED_ALLOCATOR_CUH
#ifndef PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_ALLOCATOR_CUH_
#define PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_ALLOCATOR_CUH_

#include <new>

Expand All @@ -36,8 +36,9 @@ struct managed_allocator {
T* ptr = 0;
cudaError_t result = cudaMallocManaged(&ptr, n * sizeof(T));
if (cudaSuccess != result || nullptr == ptr) {
std::cerr << "ERROR: CUDA Runtime call in line " << __LINE__ << "of file " << __FILE__
<< " failed with " << cudaGetErrorString(result) << " (" << result << ") "
std::cerr << "ERROR: CUDA Runtime call in line " << __LINE__ << "of file "
<< __FILE__ << " failed with " << cudaGetErrorString(result)
<< " (" << result << ") "
<< " Attempted to allocate: " << n * sizeof(T) << " bytes.\n";
throw std::bad_alloc();
}
Expand All @@ -55,4 +56,4 @@ bool operator!=(const managed_allocator<T>&, const managed_allocator<U>&) {
return false;
}

#endif
#endif // PADDLE_FLUID_FRAMEWORK_FLEET_HETER_PS_CUDF_MANAGED_ALLOCATOR_CUH_
6 changes: 3 additions & 3 deletions paddle/fluid/framework/new_executor/executor_statistics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -600,9 +600,9 @@ void StatisticsEngine::Log(const std::string& filepath) {
for (size_t idx = 0; idx < statistics_.size(); ++idx) {
const auto& evt_stat = statistics_[idx];
ofs << platform::string_format(std::string(R"JSON(
{
"statistical item" : "%s",
"total time(ns)" : %llu,
{
"statistical item" : "%s",
"total time(ns)" : %llu,
"total number of times" : %llu,
"normalization time(ns)" : %llu
},)JSON"),
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ class LogitOpMaker : public framework::OpProtoAndCheckerMaker {
"(float, default 1e-6f) the epsilon for input clamp bound")
.SetDefault(1e-6f);
AddComment(R"DOC(
Logit Operator.
Logit Operator.
this function is defined as follow:
$ logit=ln\left ( {\frac {x} {1-x}} \right ) $
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/add_position_encoding_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
});
AddComment(R"DOC(
Add Position Encoding Operator.
The add position encoding calculates the output based on the input, alpha, beta.
The size of each dimension of the parameters checked in the infer-shape.
)DOC");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/addmm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class AddMMOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
AddMM Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$ with coefficient $alpha$.
$input$ with coefficient $beta$ is added to the final result.
$input$ with coefficient $beta$ is added to the final result.
The equation is:
$$Out = alpha * x * y + beta * input$$
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/affine_grid_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,28 +177,28 @@ class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker {
[x_14, x_15, x_16]]
[[x_21, x_22, x_23]
[x_24, x_25, x_26]]]
OutputShape = [2, 3, 5, 5]
Step 1:
Generate relative coordinates according to OutputShape.
The values of relative coordinates are in the interval between -1 and 1.
The shape of the relative coordinates is [2, H, W] as below:
C = [[[-1. -1. -1. -1. -1. ]
[-0.5 -0.5 -0.5 -0.5 -0.5]
[ 0. 0. 0. 0. 0. ]
[ 0.5 0.5 0.5 0.5 0.5]
[ 1. 1. 1. 1. 1. ]]
[ 1. 1. 1. 1. 1. ]]
[[-1. -0.5 0. 0.5 1. ]
[-1. -0.5 0. 0.5 1. ]
[-1. -0.5 0. 0.5 1. ]
[-1. -0.5 0. 0.5 1. ]
[-1. -0.5 0. 0.5 1. ]]]
C[0] is the coordinates in height axis and C[1] is the coordinates in
width axis.
Step2:
Tanspose and reshape C to shape [H * W, 2] and append ones to last
dimension. The we get:
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/allclose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class AllcloseOpMaker : public framework::OpProtoAndCheckerMaker {
"compared as equal. Default: :math:`False` .")
.SetDefault(false);

AddComment(R"DOC(
AddComment(R"DOC(
This operator checks if all :math:`x` and :math:`y` satisfy the condition:
.. math::
Expand Down Expand Up @@ -110,7 +110,7 @@ REGISTER_OP_VERSION(allclose)
"The added input 'Atol' is not"
"dispensable."))
.AddCheckpoint(
R"ROC(Delete two float attributes [rtol] and [atol],
R"ROC(Delete two float attributes [rtol] and [atol],
then add 2 string attributes [atol, rtol]. Don't be surprised.
This is because float cannot represent hight-precision
floating-point values, and our framework doesn't support
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/amp/check_finite_and_unscale_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ Check if input X contains all finite data, if yes, scale it by input Scale.
$$Out = X / scale$$
If any tensor in X contains Inf or Nan, the Out will generate a indicator.
FoundInfinite will be 1 (True), and Out will not be scaled. In this case, the data of
Out should not be used, and its data may not be deterministic.
FoundInfinite will be 1 (True), and Out will not be scaled. In this case, the data of
Out should not be used, and its data may not be deterministic.
Otherwise, FoundInfinite will be 0 (False).
)DOC");
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/amp/update_loss_scaling_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ class UpdateLossScalingOpMaker : public framework::OpProtoAndCheckerMaker {
"Stop updating loss scaling, and just zero inputs.")
.SetDefault(false);
AddComment(R"DOC(
Update loss scaling according to overall gradients. If all gradients is
finite after incr_every_n_steps, loss scaling will increase by incr_ratio.
Update loss scaling according to overall gradients. If all gradients is
finite after incr_every_n_steps, loss scaling will increase by incr_ratio.
Otherwise, loss scaling will decrease by decr_ratio after
decr_every_n_nan_or_inf steps and each step some gradients are infinite.
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/argsort_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ class ArgsortOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Argsort operator
Performs sorting on the input tensor along the given axis and outputs two
tensors, Output(Out) and Output(Indices). They reserve the same shape
with Input(X), and Output(Out) represents the sorted tensor while
Performs sorting on the input tensor along the given axis and outputs two
tensors, Output(Out) and Output(Indices). They reserve the same shape
with Input(X), and Output(Out) represents the sorted tensor while
Output(Indices) gives the sorted order along the given axis Attr(axis).
)DOC");
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/array_to_lod_tensor_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,10 @@ class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"'paddle/framework/lod_rank_table.h' for more details.");
AddOutput("Out", "(LoDTensor) The LoDTensor formed by input tensor array.");
AddComment(
R"DOC(This Op build a big LoDTensor from a std::vector<LoDTensor>
R"DOC(This Op build a big LoDTensor from a std::vector<LoDTensor>
and a LoDRankTable. It is supposed to be used in getting dynamic RNN's
outputs back to a normal LoDTensor. The std::vector<LoDTensor>
would be the output of RNN Op and the LoDRankTable would be build
outputs back to a normal LoDTensor. The std::vector<LoDTensor>
would be the output of RNN Op and the LoDRankTable would be build
with RNN's input.)DOC");
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/assign_pos_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class AssignPosOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
assign_pos_op Operator.
Assign pos decides which tokens should be fetched belong to
Assign pos decides which tokens should be fetched belong to
specially counter orderingly.
)DOC");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/attention_lstm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ tmp(seqlen*(M+D)) * fc((M+D)*1) => fcout(seqlen*1) with bias, relu
fcout(seqlen*1) * scalar => fcout(seqlen*1) with bias, relu
dotmul and sum pool ( fcout(seqlen*1), x(seqlen * M) ) => lstm_x_t(1, M)
dotmul and sum pool ( fcout(seqlen*1), x(seqlen * M) ) => lstm_x_t(1, M)
LSTM part:
use lstm_x_t as input and compute as standard LSTM.
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/bmm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ class BmmOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out", "(Tensor), The output tensor of Bmm op.");
AddComment(R"DOC(
The Bmm operator is used to perform batched matrix multiplication
over the last two dimensions of the input tensors `X` and `Y`
which are both 3-dimentionsal.
over the last two dimensions of the input tensors `X` and `Y`
which are both 3-dimentionsal.
Examples:
- X: [B, M, K], Y: [B, K, N] => Out: [B, M, N]
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/broadcast_tensors_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class BroadcastTensorsOpMaker : public framework::OpProtoAndCheckerMaker {
"consistent with :code:`x`.")
.AsDuplicable();
AddComment(
R"DOC(This OP is used to broadcast a vector of inputs
R"DOC(This OP is used to broadcast a vector of inputs
with Tensor or LoDTensor type, following broadcast semantics.)DOC");
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/center_loss_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ class CenterLossOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<bool>("need_update", "whether need to update center info.");
AddComment(R"DOC(
**CenterLoss operator**
implemention of the center loss function in the papper<<A Discriminative
implemention of the center loss function in the papper<<A Discriminative
Feature Learning Approach for Deep Face Recognition>>, equations in this implement
is:loss = 1/2 * (x-y)^2 ,where x(X) means the deep feature(output of last hidden layer )
and y(Label) the target label
and y(Label) the target label
)DOC");
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/channel_shuffle_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ class ChannelShuffleOpMaker : public framework::OpProtoAndCheckerMaker {
while keeping the original tensor shape.
Please refer to the paper:
`ShuffleNet: An Extremely Efficient Convolutional Neural Network for
`ShuffleNet: An Extremely Efficient Convolutional Neural Network for
Mobile Devices <https://arxiv.org/abs/1707.01083>`_
by Zhang et. al (2017) for more details.
by Zhang et. al (2017) for more details.
)DOC");
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/chunk_eval_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ For some basics of chunking, please refer to
ChunkEvalOp computes the precision, recall, and F1-score of chunk detection,
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example of labeling for these tagging schemes:
Li Ming works at Agricultural Bank of China in Beijing.
IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
Expand All @@ -158,13 +158,13 @@ and LOC(LOCATION), and we can see that the labels have the form <tag type>-<chun
Since the calculations actually use label ids rather than labels, extra attention
should be paid when mapping labels to ids to make CheckEvalOp work. The key point
is that the listed equations are satisfied by ids.
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cinn/cinn_instruction_run_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ CINN(https://github.com/PaddlePaddle/CINN/blob/develop/README.md) instruction ex
Both the input and output of this operator are a set of variables
which are the input and output arguments of the bound cinn instruction respectively.
In addition, there is an attribute named 'cached_index' should be
set necessarily to get the CinnCompiledObject where the instruction is included
set necessarily to get the CinnCompiledObject where the instruction is included
and 'instruction_index' is fetch the instruction object from complied runtime prograrm.
It accomplishes the execution of the instruction according to the following steps:
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/class_center_sample_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ class ClassCenterSampleOpMaker : public framework::OpProtoAndCheckerMaker {
The process of sampling subset class centers is straightforward: 1) First select the positive class centers;
2) Randomly sample negative class centers. Specifically, given a Label tensor, shape [batch_size], select all
the positive class centers and randomly sample negative class centers, then remap the input label tensor using
the sampled class centers. Note that if the number of the positive class centers is greater than the input
num_samples, it keeps all the positive class centers and the shape of SampledLocalClassCenter will be
the sampled class centers. Note that if the number of the positive class centers is greater than the input
num_samples, it keeps all the positive class centers and the shape of SampledLocalClassCenter will be
[num_positive_class_centers]. The op supports CPU, single GPU and multi GPU.
For more information, Partial FC: Training 10 Million Identities on a Single Machine
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/collective/global_scatter_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out", "(Tensor) the result of global_scatter.");
AddComment(R"DOC(
Global Scatter Operator
Scatter data in X which has been put together belong to one expert
to n_expert * world_size exeperts according to local_count
Scatter data in X which has been put together belong to one expert
to n_expert * world_size exeperts according to local_count
and receive tensors from n_expert * world_size experts according
to global_count.
)DOC");
Expand Down
Loading

0 comments on commit 5ed27b0

Please sign in to comment.