Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed Mar 4, 2025
1 parent 6e62a0c commit 879a73e
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 16 deletions.
4 changes: 2 additions & 2 deletions paddle/cinn/common/simplify_special_pattern.cc
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,8 @@ std::optional<ir::IndexExpr> DivMulAddModDivCase(const ir::IndexExpr& lhs,
// Check if the pattern is matched
if (CheckPattern(cand, pattern, &map) &&
map.at("c") == map.at("a") * map.at("b")) {
ir::IndexExpr simplied = map.at("f") / map.at("b");
res = res.defined() ? res + simplied : simplied;
ir::IndexExpr simplified = map.at("f") / map.at("b");
res = res.defined() ? res + simplified : simplified;
find = true;
continue;
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/cinn/runtime/cuda/cuda_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1667,11 +1667,11 @@ void GemmStridedBatched(const cublasHandle_t &cublas,
PADDLE_ENFORCE_EQ(
lhs_bs,
rhs_bs,
::common::errors::InvalidArgument("bs of lhs and rhs dismatch."));
::common::errors::InvalidArgument("bs of lhs and rhs mismatch."));
PADDLE_ENFORCE_EQ(
lhs_bs,
output_bs,
::common::errors::InvalidArgument("bs of lhs and output dismatch."));
::common::errors::InvalidArgument("bs of lhs and output mismatch."));

// copy values of bias_data to the output_data
if (bias_data != nullptr) {
Expand Down Expand Up @@ -1858,7 +1858,7 @@ void cinn_call_triangular_solve_nvgpu(void *v_args,
PADDLE_ENFORCE_EQ(input1->type.bits,
input2->type.bits,
::common::errors::InvalidArgument(
"input1 and ipnput2's type bits is dismatch."));
"input1 and input2's type bits is mismatch."));
uint8_t bits = input1->type.bits;
uint8_t bytes = bits / 8;
PADDLE_ENFORCE_EQ(
Expand Down Expand Up @@ -2035,11 +2035,11 @@ void cinn_gpu_cublas_gemm(const std::vector<int> &attrs,
PADDLE_ENFORCE_EQ(lhs_dim_size,
rhs_dim_size,
::common::errors::InvalidArgument(
"dimension dismatch between lhs and rhs."));
"dimension mismatch between lhs and rhs."));
PADDLE_ENFORCE_EQ(lhs_dim_size,
out_dim_size,
::common::errors::InvalidArgument(
"dimension dismatch between lhs and out."));
"dimension mismatch between lhs and out."));
PADDLE_ENFORCE_EQ(
(lhs_dim_size == 2 || lhs_dim_size == 3),
true,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/auto_mixed_precision_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -902,7 +902,7 @@ void AutoMixedPrecisionPass::SetVarPrecision() const {
}
}

// This code used to precess vars with the same name. Vars with the same
// This code used to process vars with the same name. Vars with the same
// name should have the same data type.
for (auto* subgraph : subgraphes_) {
for (auto* var_node : subgraph->Nodes()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ void CinnJitInstruction::Run() {
dev_ctx_->Alloc(tensor_args_[i], tensor_args_[i]->dtype());
}

// 2. exexute kernel
// 2. execute kernel
fn_ptr_impl_->Run(tensor_args_, running_stream, is_gpu);

// 3. release resource
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ class TensorRTEngine {
}
}

// NOTE: The func bellow was modified to adapt the dynamic shape.
// NOTE: The func below was modified to adapt the dynamic shape.
// Initialize the inference network, so that TensorRT layers can add to this
// network.
void InitNetwork();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class OneDNNBf16PlacementPattern : public pir::RewritePattern {
idx++) {
auto input_type =
vector_type[idx].isa<paddle::dialect::DenseTensorType>();
// We don't precess nested VectorType
// We don't process nested VectorType
if (!input_type) return false;
pir::Type input_dtype =
vector_type[idx]
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/edit_distance_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ void EditDistanceKernel(const Context& ctx,
0,
stream>>>(dist, n);

// Compute the elements of distance matrix in the anti-diagonal diretion
// Compute the elements of distance matrix in the anti-diagonal direction
for (int64_t slice = 2; slice < m + n + 1; ++slice) {
int z_m = slice < m + 1 ? 0 : slice - m;
int z_n = slice < n + 1 ? 0 : slice - n;
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/jit/pir_dy2static/parameter_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,11 @@ def restore_checkpoint(self, checkpoint):
self.params_dict = checkpoint

def save_checkpoint(self):
ckeckpoint = {}
checkpoint = {}
for program_id, params in self.params_dict.items():
new_params = dict(params.items())
ckeckpoint[program_id] = new_params
return ckeckpoint
checkpoint[program_id] = new_params
return checkpoint


_global_parameter_recorder = ParametersRecorder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1667,7 +1667,7 @@ def UNPACK_EX(self, instr: Instruction):

if instr.argval >= 256:
# NOTE: If the number of unpacked variables exceeds 256, python will report an error like:
# SyntaxError: too many expressions in star-unpacking assignmen,
# SyntaxError: too many expressions in star-unpacking assignment,
# so if the number of unpacked variables exceeds 256, it will be treated as the following case.
# a, b, *c, d = e
front_nums = instr.arg & 0xFF
Expand Down

0 comments on commit 879a73e

Please sign in to comment.