Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][D-[37-44]] Fix typos (dito,devide,documention,doens,doen,dobule,doubel,dowloading,downsteram) #70642

Merged
merged 8 commits into from
Jan 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,6 @@ unpacket = "unpacket"
# These words need to be fixed
Creater = 'Creater'
creater = 'creater'
dito = 'dito'
devide = 'devide'
documention = 'documention'
doens = 'doens'
doen = 'doen'
doubel = 'doubel'
dobule = 'dobule'
Dowloading = 'Dowloading'
downsteram = 'downsteram'
fetchs = 'fetchs'
Flattend = 'Flattend'
flattend = 'flattend'
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/operator_fusion/graph_transformer/matcher.h
Original file line number Diff line number Diff line change
Expand Up @@ -285,9 +285,9 @@ struct LeafReshapeConnectionMatcher {
struct NotAllElementWiseDownstreamMatcher {
bool operator()(const PatternGraph& graph, const PatternNodePtr& node) {
size_t count = 0;
for (const auto& downsteram : node->downstream()) {
if (StmtPatternGraphMatcher<TrivialPattern>()(graph, downsteram)) {
auto ops = std::get<TrivialPattern>(downsteram->stmt_pattern()).ops();
for (const auto& downstream : node->downstream()) {
if (StmtPatternGraphMatcher<TrivialPattern>()(graph, downstream)) {
auto ops = std::get<TrivialPattern>(downstream->stmt_pattern()).ops();
bool is_elementwise =
std::all_of(ops.begin(), ops.end(), [](pir::Operation* op) {
return GetOpPatternKind(op) == hlir::framework::kElementWise;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2383,7 +2383,7 @@ struct SimpleOpTypeSetTeller : public Teller {

// conv3d_transpose
if (op_type == "conv3d_transpose") {
// trt doen't support output_padding when < 8406
// trt doesn't support output_padding when < 8406
// output_padding is usually set when stride > 1
#if !IS_TRT_VERSION_GE(8400)
if (desc.HasAttr("output_padding")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ static void AssignZeroToParentScope(
PADDLE_ENFORCE_EQ(input_tensors.size(),
outside_tensors->size(),
common::errors::InvalidArgument(
"DenseTensorArray outside_var %s doen't have same "
"DenseTensorArray outside_var %s doesn't have same "
"size as input_var %s.",
outside_grad_name,
input_name));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/elementwise/elementwise_mul_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class ElementwiseMulCompositeDoubleGradOpMaker
-1,
common::errors::InvalidArgument(
"We only support axis = -1 in composite "
"add_doubel_grad but we got: ",
"add_double_grad but we got: ",
axis));

// get output
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/elementwise/elementwise_sub_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class ElementwiseSubCompositeDoubleGradOpMaker
-1,
common::errors::InvalidArgument(
"We only support axis = -1 in composite "
"subtract_doubel_grad but we got: ",
"subtract_double_grad but we got: ",
axis));

paddle::Tensor* grad_out_grad = this->GetOutputPtr(&grad_out_grad_t);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/op_generator/op_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ class {TEST_API} {op_name} : public pir::Op<{op_name}{interfaces}{traits}> {{
'int': 'pir::Int32Attribute',
'int64_t': 'pir::Int64Attribute',
'float': 'pir::FloatAttribute',
'dobule': 'pir::DoubleAttribute',
'double': 'pir::DoubleAttribute',
'bool': 'pir::BoolAttribute',
}

Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/funcs/values_vectors_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ struct MatrixEighFunctor<GPUContext, T> {
"When has_vectors is true,"
"the eigenvectors needs to be calculated,"
"so the eigenvectors must be provided."));
// input_trans = dito.Transpose(input_trans);
input_trans = phi::TransposeLast2Dim<T>(dev_ctx, input_trans);
eigen_vectors->ShareDataWith(input_trans);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3674,7 +3674,7 @@ function distribute_test() {
parallel_fa_unit
echo "End FA tests"

echo "Dowloading ...."
echo "Downloading ...."
cd ${work_dir}
wget https://paddlenlp.bj.bcebos.com/wheels/PaddleNLP_stable_paddle.tar.gz --no-proxy
tar -zvxf PaddleNLP_stable_paddle.tar.gz
Expand Down
2 changes: 1 addition & 1 deletion paddle/utils/string/printf.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
// weekday, month, day, hour, min);
//
// 2. High-performance -- most printed strings are not too long and
// doens't need dynamic memory allocation. Many StringPrintf
// doesn't need dynamic memory allocation. Many StringPrintf
// implementations doesn't enforce type-safe, but are
// high-performance, including
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ def apply(
key_states = paddle.transpose(key_states, [0, 2, 1, 3])
value_states = paddle.transpose(value_states, [0, 2, 1, 3])

# matmul and devide by sqrt(head_dim)
# matmul and divide by sqrt(head_dim)
attn_weights = paddle.matmul(
query_states / math.sqrt(head_dim),
key_states.transpose([0, 1, 3, 2]),
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -1407,7 +1407,7 @@ def set_gradient_clip(clip, param_list=None, program=None):
"We recommend a new strategy: set 'grad_clip' "
"when initializing the 'optimizer'. "
"This method can reduce the mistakes, please "
"refer to documention of 'optimizer'."
"refer to documentation of 'optimizer'."
)

if not isinstance(clip, ClipGradBase):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def scaled_dot_product_attention(
key_states = paddle.transpose(key_states, [0, 2, 1, 3])
value_states = paddle.transpose(value_states, [0, 2, 1, 3])

# matmul and devide by sqrt(head_dim)
# matmul and divide by sqrt(head_dim)
attn_weights = paddle.matmul(
query_states / math.sqrt(head_dim), key_states.transpose([0, 1, 3, 2])
)
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/cinn/llama_test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def scaled_dot_product_attention(
key_states = paddle.transpose(key_states, [0, 2, 1, 3])
value_states = paddle.transpose(value_states, [0, 2, 1, 3])

# matmul and devide by sqrt(head_dim)
# matmul and divide by sqrt(head_dim)
attn_weights = paddle.matmul(
query_states / math.sqrt(head_dim), key_states.transpose([0, 1, 3, 2])
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def test_extremely_simple_net_with_op_in_condition(self):
main_program, fetch_list=[out, b, a.grad_name, b.grad_name]
)
# Note: fill_constant has loss of precision, you have to assertEqual
# with values doens't lose precision in float-point number.
# with values doesn't lose precision in float-point number.
self.assertEqual(ret[0][0], ret[1][0])
self.assertEqual(ret[2][0], 0.0)
self.assertEqual(ret[3][0], 1.0)
Expand Down
Loading