Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][A-[21-27],A-[29-30]] Fix typos (allctor,almostly,alog,Aread,Allways,alway,ane,adn,expaned,annotaions,annotaion,architecure,architecuture,aer) #69644

Merged
merged 1 commit into from
Nov 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 1 addition & 10 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ pash = 'pash'
astroid = 'astroid'
eles = 'eles'
builer = 'builer'
anc = 'anc'

# These words need to be fixed
ontext = 'ontext'
Expand Down Expand Up @@ -71,7 +72,6 @@ bootom = 'bootom'
Vetical = 'Vetical'
mantain = 'mantain'
patition = 'patition'
almostly = 'almostly'
Dowloading = 'Dowloading'
Prepar = 'Prepar'
precent = 'precent'
Expand Down Expand Up @@ -122,7 +122,6 @@ decalared = 'decalared'
coalesc = 'coalesc'
graident = 'graident'
infered = 'infered'
Allways = 'Allways'
substitue = 'substitue'
Ouput = 'Ouput'
witk = 'witk'
Expand All @@ -132,7 +131,6 @@ staticly = 'staticly'
emited = 'emited'
repalce = 'repalce'
GARD = 'GARD'
annotaions = 'annotaions'
gloabl = 'gloabl'
devide = 'devide'
zerp = 'zerp'
Expand Down Expand Up @@ -225,8 +223,6 @@ Rto = 'Rto'
tunning = 'tunning'
kerenl = 'kerenl'
Temperarily = 'Temperarily'
alway = 'alway'
ane = 'ane'
cliping = 'cliping'
DEIVCE = 'DEIVCE'
neeed = 'neeed'
Expand Down Expand Up @@ -541,7 +537,6 @@ instrinsics = 'instrinsics'
outputing = 'outputing'
hadler = 'hadler'
qucik = 'qucik'
alog = 'alog'
exsit = 'exsit'
deciamls = 'deciamls'
uncorrectly = 'uncorrectly'
Expand Down Expand Up @@ -644,7 +639,6 @@ insid = 'insid'
coodinate = 'coodinate'
usefull = 'usefull'
sqaure = 'sqaure'
adn = 'adn'
intialize = 'intialize'
Taget = 'Taget'
parm = 'parm'
Expand Down Expand Up @@ -730,7 +724,6 @@ compitable = 'compitable'
comple = 'comple'
dealed = 'dealed'
ser = 'ser'
anc = 'anc'
contraints = 'contraints'
propogated = 'propogated'
beacuse = 'beacuse'
Expand Down Expand Up @@ -795,7 +788,6 @@ imformation = 'imformation'
kernerl = 'kernerl'
Boardcast = 'Boardcast'
Greate = 'Greate'
Alread = 'Alread'
unkown = 'unkown'
recevied = 'recevied'
Normlized = 'Normlized'
Expand All @@ -804,7 +796,6 @@ orginal = 'orginal'
Stati = 'Stati'
Destory = 'Destory'
seperately = 'seperately'
alloctor = 'alloctor'
fullfill = 'fullfill'
Substitude = 'Substitude'
producted = 'producted'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ struct SearchAlgorithm<ReverseTopoNodePairPattern,
template <typename Kind, typename GraphMatcher, typename GraphOperation>
void GraphTransformer(PatternGraph* graph) {
VLOG(4) << "Start GraphTransformer...";
auto alog = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
alog();
auto algo = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
algo();
}

} // namespace cinn::fusion
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/fused_attention_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -389,8 +389,8 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
"The qkv_w shape is (h, 3h), do transpose to it.")
.SetDefault(false);
AddAttr<bool>("pre_layer_norm",
"if true, the attention op uses pre_layer_norm architecure, "
"else, uses post_layer_norm architecuture. "
"if true, the attention op uses pre_layer_norm architecture, "
"else, uses post_layer_norm architecture. "
"[default false].")
.SetDefault(false);
AddAttr<float>("epsilon",
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/init.cc
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ void SignalHandle(const char *data, int size) {
// NOTE1: The glog FailureSignalHandler dumped messages
// are deal with line by line
auto signal_msg_dumper_ptr = SignalMessageDumper::Instance().Get();
// NOTE2: we only deal with the time info ane signal info,
// NOTE2: we only deal with the time info and signal info,
// the stack trace will generated by paddle self
if (StartsWith(data, "*** Aborted at")) {
*signal_msg_dumper_ptr << "\n [TimeInfo: " << std::string(data, size - 1)
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2476,7 +2476,7 @@ All parameter, weight, gradient are variables in Paddle.

m.def("_is_program_version_supported", IsProgramVersionSupported);
#if defined(PADDLE_WITH_CUDA)
m.def("alloctor_dump", [](const phi::GPUPlace &place) {
m.def("allocator_dump", [](const phi::GPUPlace &place) {
auto allocator = std::dynamic_pointer_cast<
paddle::memory::allocation::AutoGrowthBestFitAllocator>(
paddle::memory::allocation::AllocatorFacade::Instance()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ SpmdInfo CrossEntropyWithSoftmaxInferSpmdBase(const DistMetaTensor& x,
// todo if softmax_normalize axis is sharded, notify downstream phi api to
// select c_softmax_with_entropy_kernel.

// according to the phi api implementation, the softmax_out tensor will alway
// according to the phi api implementation, the softmax_out tensor will always
// be generated not matter the value of use_softmax.
return {{x_dist_attr_dst, label_dist_attr_dst},
{softmax_out_dist_attr_dst, loss_dist_attr_dst}};
Expand Down Expand Up @@ -363,7 +363,7 @@ SpmdInfo CrossEntropyWithSoftmaxInferSpmdReverse(
<< str_join(x_dims_mapping) << "]\nLabel dims_mapping: ["
<< str_join(label_dims_mapping) << "]\n\n";

// according to the phi api implementation, the softmax_out tensor will alway
// according to the phi api implementation, the softmax_out tensor will always
// be generated not matter the value of use_softmax.
return {{x_dist_attr, label_dist_attr},
{s_out_dist_attr_dst, loss_dist_attr_dst}};
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/passes/auto_parallel_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def _keep_fp32_input(op, in_name):

# TODO check if bf16 and fp16 still share the same logic
def _keep_fp32_output(op, out_name):
# TODO(lizhiyu02): Support 'custom_white_list' adn 'custom_black_list' in amp_options
# TODO(lizhiyu02): Support 'custom_white_list' and 'custom_black_list' in amp_options
if not op.amp_options.enable:
return True
op_type = op.type
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/asp/supported_layer_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _default_pruning(
# In sparse training, layer weight matrices is viewed sparse matrix A, so
# the math formula should be 'Act(WX + b)'. However, default formula in PaddlePaddle
# is 'Act(XW + b)'. For enabling SPMMA, weights and inputs should be transposed
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune alog k dimension
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune along k dimension
# of W^T, which is m dimension of W. Moreover, all mask generating functions in
# asp/utils is row-major pruning. That is the reason we have to transpose weight
# matrices before invoking create_mask. Then we transpose the result mask to make
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/utils/transform_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from paddle._typing import ShapeLike


# input==output, inplace strategy of reshape has no cost almostly
# input==output, inplace strategy of reshape has no cost almost
def _inplace_reshape_dygraph(x: Tensor, shape: ShapeLike) -> None:
x_shape = _create_tensor(dtype='int64')
if in_dygraph_mode():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/optimizer/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1157,7 +1157,7 @@ def _create_optimization_pass(
# _create_accumulators method if it needs to create accumulators
# for parameters and extend _finish_update method to add custom ops.

# Allways called under program_guard use global block as loss block
# Always called under program_guard use global block as loss block
# But if current block is in control flow, append optimize op in the
# grad block of current block

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def __init__(
self._quantized_weight_var_name = set()
self._quantized_act_var_name = set()
self._weight_op_pairs = {}
# The vars for alog = KL or hist
# The vars for algo = KL or hist
self._sampling_act_abs_min_max = {}
self._sampling_act_histogram = {}
self._sampling_data = {}
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/tensor.prototype.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# The `Tensor` template `tensor.prototype.pyi` for `tools/gen_tensor_stub.py` to generate the stub file `tensor.pyi`.
# Add docstring, attributes, methods and alias with type annotaions for `Tensor` in `tensor.prototype.pyi`
# Add docstring, attributes, methods and alias with type annotations for `Tensor` in `tensor.prototype.pyi`
# if not conveniently coding in original place (like c++ source file).

# Import common typings for generated methods
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/platform/errors_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ TEST(Errors, NotFound) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(NotFound); }

TEST(Errors, OutOfRange) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(OutOfRange); }

TEST(Errors, AlreadExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }
TEST(Errors, AlreadyExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }

TEST(Errors, ResourceExhausted) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ResourceExhausted);
Expand Down
8 changes: 4 additions & 4 deletions test/cpp/phi/kernels/test_gpu_timer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -86,19 +86,19 @@ TEST(GpuTimer, Sum) {
#endif

using Functor = std::function<void(float *, float *, size_t)>;
Functor alog0 = Algo<4, 256, 1024>;
Functor algo0 = Algo<4, 256, 1024>;
Functor algo1 = Algo<1, 256, 1024>;
Functor alog2 = Algo<1, 256, 8>;
Functor algo2 = Algo<1, 256, 8>;

std::vector<Functor> algos = {alog0, algo1, alog2};
std::vector<Functor> algos = {algo0, algo1, algo2};

for (int j = 0; j < algos.size(); ++j) {
auto algo = algos[j];
phi::GpuTimer timer;
timer.Start(0);
algo(d_in1, d_in2, N);
timer.Stop(0);
VLOG(3) << "alog: " << j << " cost: " << timer.ElapsedTime() << "ms";
VLOG(3) << "algo: " << j << " cost: " << timer.ElapsedTime() << "ms";
}

#ifdef __HIPCC__
Expand Down