diff --git a/be/src/common/daemon.cpp b/be/src/common/daemon.cpp index e126baa764b370..5b045c06860f22 100644 --- a/be/src/common/daemon.cpp +++ b/be/src/common/daemon.cpp @@ -180,12 +180,12 @@ static void init_doris_metrics(const std::vector& store_paths) { if (init_system_metrics) { auto st = DiskInfo::get_disk_devices(paths, &disk_devices); if (!st.ok()) { - LOG(WARNING) << "get disk devices failed, stauts=" << st.get_error_msg(); + LOG(WARNING) << "get disk devices failed, status=" << st.get_error_msg(); return; } st = get_inet_interfaces(&network_interfaces); if (!st.ok()) { - LOG(WARNING) << "get inet interfaces failed, stauts=" << st.get_error_msg(); + LOG(WARNING) << "get inet interfaces failed, status=" << st.get_error_msg(); return; } } diff --git a/be/src/env/env.h b/be/src/env/env.h index 8cbaaf7ad8c3d4..18a1cc4d98b46c 100644 --- a/be/src/env/env.h +++ b/be/src/env/env.h @@ -123,7 +123,7 @@ class Env { // // The function call extra cost is acceptable. Compared with returning all children // into a given vector, the performance of this method is 5% worse. However this - // approach is more flexiable and efficient in fulfilling other requirements. + // approach is more flexible and efficient in fulfilling other requirements. // // Returns OK if "dir" exists. // NotFound if "dir" does not exist, the calling process does not have diff --git a/be/src/exec/broker_writer.cpp b/be/src/exec/broker_writer.cpp index e7305cceeb03d1..4474cd06ae9206 100644 --- a/be/src/exec/broker_writer.cpp +++ b/be/src/exec/broker_writer.cpp @@ -147,7 +147,7 @@ Status BrokerWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_l return status; } - // we do not re-try simplely, because broker server may already write data + // we do not re-try simply, because broker server may already write data try { client->pwrite(response, request); } catch (apache::thrift::transport::TTransportException& e) { diff --git a/be/src/exec/decompressor.cpp b/be/src/exec/decompressor.cpp index aeb28a2ef8c6de..c64a0174a7da3a 100644 --- a/be/src/exec/decompressor.cpp +++ b/be/src/exec/decompressor.cpp @@ -131,7 +131,7 @@ Status GzipDecompressor::decompress( ret = inflateReset(&_z_strm); if (ret != Z_OK) { std::stringstream ss; - ss << "Failed to inflateRset. return code: " << ret; + ss << "Failed to inflateReset. return code: " << ret; return Status::InternalError(ss.str()); } } else if (ret != Z_OK) { diff --git a/be/src/exec/es/es_predicate.cpp b/be/src/exec/es/es_predicate.cpp index c38fab9ebfe925..6c6b609c996ef8 100644 --- a/be/src/exec/es/es_predicate.cpp +++ b/be/src/exec/es/es_predicate.cpp @@ -229,7 +229,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { // process binary predicate if (TExprNodeType::BINARY_PRED == conjunct->node_type()) { if (conjunct->children().size() != 2) { - return Status::InternalError("build disjuncts failed: number of childs is not 2"); + return Status::InternalError("build disjuncts failed: number of children is not 2"); } SlotRef* slot_ref = nullptr; TExprOpcode::type op; @@ -238,7 +238,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { // doris on es should ignore this doris native cast transformation, we push down this `cast` to elasticsearch // conjunct->get_child(0)->node_type() return CAST_EXPR // conjunct->get_child(1)->node_type()return FLOAT_LITERAL - // the left child is literal and right child is SlotRef maybe not happend, but here we just process + // the left child is literal and right child is SlotRef maybe not happened, but here we just process // this situation regardless of the rewrite logic from the FE's Query Engine if (TExprNodeType::SLOT_REF == conjunct->get_child(0)->node_type() || TExprNodeType::CAST_EXPR == conjunct->get_child(0)->node_type()) { @@ -288,7 +288,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { std::string fname = conjunct->fn().name.function_name; if (fname == "esquery") { if (conjunct->children().size() != 2) { - return Status::InternalError("build disjuncts failed: number of childs is not 2"); + return Status::InternalError("build disjuncts failed: number of children is not 2"); } Expr* expr = conjunct->get_child(1); ExtLiteral literal(expr->type().type, _context->get_value(expr, NULL)); @@ -310,7 +310,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { _disjuncts.push_back(predicate); } else if (fname == "is_null_pred" || fname == "is_not_null_pred") { if (conjunct->children().size() != 1) { - return Status::InternalError("build disjuncts failed: number of childs is not 1"); + return Status::InternalError("build disjuncts failed: number of children is not 1"); } // such as sub-query: select * from (select split_part(k, "_", 1) as new_field from table) t where t.new_field > 1; // conjunct->get_child(0)->node_type() == TExprNodeType::FUNCTION_CALL, at present doris on es can not support push down function @@ -330,7 +330,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { _disjuncts.push_back(predicate); } else if (fname == "like") { if (conjunct->children().size() != 2) { - return Status::InternalError("build disjuncts failed: number of childs is not 2"); + return Status::InternalError("build disjuncts failed: number of children is not 2"); } SlotRef* slot_ref = nullptr; Expr* expr = nullptr; @@ -426,7 +426,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { return Status::OK(); } if (TExprNodeType::COMPOUND_PRED == conjunct->node_type()) { - // processe COMPOUND_AND, such as: + // process COMPOUND_AND, such as: // k = 1 or (k1 = 7 and (k2 in (6,7) or k3 = 12)) // k1 = 7 and (k2 in (6,7) or k3 = 12) is compound pred, we should rebuild this sub tree if (conjunct->op() == TExprOpcode::COMPOUND_AND) { diff --git a/be/src/exec/es/es_query_builder.cpp b/be/src/exec/es/es_query_builder.cpp index 64f3d173d96c9f..a448ecc7c03738 100644 --- a/be/src/exec/es/es_query_builder.cpp +++ b/be/src/exec/es/es_query_builder.cpp @@ -243,9 +243,9 @@ BooleanQueryBuilder::BooleanQueryBuilder(const std::vector& predi break; } case TExprNodeType::IS_NULL_PRED: { - ExtIsNullPredicate* is_null_preidicate = (ExtIsNullPredicate *)predicate; - ExistsQueryBuilder* exists_query = new ExistsQueryBuilder(*is_null_preidicate); - if (is_null_preidicate->is_not_null) { + ExtIsNullPredicate* is_null_predicate = (ExtIsNullPredicate *)predicate; + ExistsQueryBuilder* exists_query = new ExistsQueryBuilder(*is_null_predicate); + if (is_null_predicate->is_not_null) { _should_clauses.push_back(exists_query); } else { BooleanQueryBuilder* bool_query = new BooleanQueryBuilder(); diff --git a/be/src/exec/es/es_query_builder.h b/be/src/exec/es/es_query_builder.h index 2a9da61dee7549..52222efbab9e20 100644 --- a/be/src/exec/es/es_query_builder.h +++ b/be/src/exec/es/es_query_builder.h @@ -90,7 +90,7 @@ class WildCardQueryBuilder : public QueryBuilder { std::string _field; }; -// no predicates: all doccument match +// no predicates: all document match class MatchAllQueryBuilder : public QueryBuilder { public: @@ -109,7 +109,7 @@ class ExistsQueryBuilder : public QueryBuilder { std::string _field; }; -// proccess bool compound query, and play the role of a bridge for transferring predicates to es native query +// process bool compound query, and play the role of a bridge for transferring predicates to es native query class BooleanQueryBuilder : public QueryBuilder { public: diff --git a/be/src/exec/es/es_scan_reader.h b/be/src/exec/es/es_scan_reader.h index 52b936caf9e93d..c428770e02a699 100644 --- a/be/src/exec/es/es_scan_reader.h +++ b/be/src/exec/es/es_scan_reader.h @@ -61,7 +61,7 @@ class ESScanReader { std::string _type; // push down filter std::string _query; - // elaticsearch shards to fetch document + // Elasticsearch shards to fetch document std::string _shards; // distinguish the first scroll phase and the following scroll bool _is_first; @@ -81,7 +81,7 @@ class ESScanReader { // Each call to the scroll API returns the next batch of results until there are no more results left to return std::string _next_scroll_url; - // _search_url used to exeucte just only one search request to Elasticsearch + // _search_url used to execute just only one search request to Elasticsearch // _search_url would go into effect when `limit` specified: // select * from es_table limit 10 -> /es_table/doc/_search?terminate_after=10 std::string _search_url; @@ -91,7 +91,7 @@ class ESScanReader { std::string _cached_response; // keep-alive for es scroll std::string _scroll_keep_alive; - // timeout for es http connetion + // timeout for es http connection int _http_timeout_ms; bool _exactly_once; diff --git a/be/src/exec/es/es_scroll_parser.cpp b/be/src/exec/es/es_scroll_parser.cpp index 2fcc5ddc1ff2ce..4e0ccfe5174fd7 100644 --- a/be/src/exec/es/es_scroll_parser.cpp +++ b/be/src/exec/es/es_scroll_parser.cpp @@ -86,7 +86,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for ss << "Expected value of type: " \ << type_to_string(type) \ << "; but found type: " << json_type_to_string(col.GetType()) \ - << "; Docuemnt slice is : " << json_value_to_string(col); \ + << "; Document slice is : " << json_value_to_string(col); \ return Status::RuntimeError(ss.str()); \ } \ } while (false) @@ -99,7 +99,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for ss << "Expected value of type: " \ << type_to_string(type) \ << "; but found type: " << json_type_to_string(col.GetType()) \ - << "; Docuemnt source slice is : " << json_value_to_string(col); \ + << "; Document source slice is : " << json_value_to_string(col); \ return Status::RuntimeError(ss.str()); \ } \ } while (false) @@ -123,7 +123,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for ss << "Expected value of type: " \ << type_to_string(type) \ << "; but found type: " << json_type_to_string(col.GetType()) \ - << "; Docuemnt source slice is : " << json_value_to_string(col); \ + << "; Document source slice is : " << json_value_to_string(col); \ return Status::RuntimeError(ss.str()); \ } \ } while (false) @@ -134,7 +134,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for ss << "Expected value of type: " \ << type_to_string(type) \ << "; but found type: " << json_type_to_string(col.GetType()) \ - << "; Docuemnt slice is : " << json_value_to_string(col); \ + << "; Document slice is : " << json_value_to_string(col); \ return Status::RuntimeError(ss.str()); \ } while (false) @@ -219,7 +219,7 @@ Status ScrollParser::parse(const std::string& scroll_result, bool exactly_once) } if (!exactly_once && !_document_node.HasMember(FIELD_SCROLL_ID)) { - LOG(WARNING) << "Document has not a scroll id field scroll reponse:" << scroll_result; + LOG(WARNING) << "Document has not a scroll id field scroll response:" << scroll_result; return Status::InternalError("Document has not a scroll id field"); } @@ -454,7 +454,7 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, // Doris On ES needs to be consistent with ES, so just divided by 1000 because the unit for from_unixtime is seconds RETURN_IF_ERROR(fill_date_slot_with_timestamp(slot, col, type)); } else if (col.IsArray() && pure_doc_value) { - // this would happend just only when `enable_docvalue_scan = true` + // this would happened just only when `enable_docvalue_scan = true` // ES add default format for all field after ES 6.4, if we not provided format for `date` field ES would impose // a standard date-format for date field as `2020-06-16T00:00:00.000Z` // At present, we just process this string format date. After some PR were merged into Doris, we would impose `epoch_mills` for @@ -466,7 +466,7 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, // ES would return millisecond timestamp for date field, divided by 1000 because the unit for from_unixtime is seconds RETURN_IF_ERROR(fill_date_slot_with_timestamp(slot, col[0], type)); } else { - // this would happend just only when `enable_docvalue_scan = false`, and field has string format date from _source + // this would happened just only when `enable_docvalue_scan = false`, and field has string format date from _source RETURN_ERROR_IF_COL_IS_ARRAY(col, type); RETURN_ERROR_IF_COL_IS_NOT_STRING(col, type); RETURN_IF_ERROR(fill_date_slot_with_strval(slot, col, type)); diff --git a/be/src/exec/es/es_scroll_query.cpp b/be/src/exec/es/es_scroll_query.cpp index 90d68f05f7d1be..0f42bf2c328dd1 100644 --- a/be/src/exec/es/es_scroll_query.cpp +++ b/be/src/exec/es/es_scroll_query.cpp @@ -68,7 +68,7 @@ std::string ESScrollQueryBuilder::build(const std::map rapidjson::Document es_query_dsl; rapidjson::Document::AllocatorType &allocator = es_query_dsl.GetAllocator(); es_query_dsl.SetObject(); - // generate the filter caluse + // generate the filter clause rapidjson::Document scratch_document; rapidjson::Value query_node(rapidjson::kObjectType); query_node.SetObject(); @@ -128,7 +128,7 @@ std::string ESScrollQueryBuilder::build(const std::map rapidjson::Value field("_doc", allocator); sort_node.PushBack(field, allocator); es_query_dsl.AddMember("sort", sort_node, allocator); - // number of docuements returned + // number of documents returned es_query_dsl.AddMember("size", size, allocator); rapidjson::StringBuffer buffer; rapidjson::Writer writer(buffer); diff --git a/be/src/exec/es/es_scroll_query.h b/be/src/exec/es/es_scroll_query.h index ed5e7e0dd7cab6..f57df642ae824e 100644 --- a/be/src/exec/es/es_scroll_query.h +++ b/be/src/exec/es/es_scroll_query.h @@ -33,7 +33,7 @@ class ESScrollQueryBuilder { static std::string build_next_scroll_body(const std::string& scroll_id, const std::string& scroll); static std::string build_clear_scroll_body(const std::string& scroll_id); // @note: predicates should processed before pass it to this method, - // tie breaker for predicate wheather can push down es can reference the push-down filters + // tie breaker for predicate whether can push down es can reference the push-down filters static std::string build(const std::map& properties, const std::vector& fields, std::vector& predicates, const std::map& docvalue_context, bool* doc_value_mode); diff --git a/be/src/exec/es_http_scan_node.cpp b/be/src/exec/es_http_scan_node.cpp index 9cc9b25f32453a..e86202a6131bdd 100644 --- a/be/src/exec/es_http_scan_node.cpp +++ b/be/src/exec/es_http_scan_node.cpp @@ -385,7 +385,7 @@ Status EsHttpScanNode::scanner_scan( if (_runtime_state->is_cancelled()) { return Status::Cancelled("Cancelled"); } - // Queue size Must be samller than _max_buffered_batches + // Queue size Must be smaller than _max_buffered_batches _batch_queue.push_back(row_batch); // Notify reader to @@ -430,7 +430,7 @@ void EsHttpScanNode::scanner_worker(int start_idx, int length, std::promise properties(_properties); properties[ESScanReader::KEY_INDEX] = es_scan_range.index; if (es_scan_range.__isset.type) { diff --git a/be/src/exec/es_http_scan_node.h b/be/src/exec/es_http_scan_node.h index 89cca57ec694e0..7c33f34ee7a54c 100644 --- a/be/src/exec/es_http_scan_node.h +++ b/be/src/exec/es_http_scan_node.h @@ -73,7 +73,7 @@ class EsHttpScanNode : public ScanNode { // Collect all scanners 's status Status collect_scanners_status(); - // One scanner worker, This scanner will hanle 'length' ranges start from start_idx + // One scanner worker, This scanner will handle 'length' ranges start from start_idx void scanner_worker(int start_idx, int length, std::promise& p_status); // Scan one range diff --git a/be/src/exec/es_scan_node.cpp b/be/src/exec/es_scan_node.cpp index d4d68d3940ae95..30c00fd1e58a84 100644 --- a/be/src/exec/es_scan_node.cpp +++ b/be/src/exec/es_scan_node.cpp @@ -400,7 +400,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct, vector& disjuncts) { if (TExprNodeType::BINARY_PRED == conjunct->node_type()) { if (conjunct->children().size() != 2) { - VLOG(1) << "get disjuncts fail: number of childs is not 2"; + VLOG(1) << "get disjuncts fail: number of children is not 2"; return false; } SlotRef* slotRef; diff --git a/be/src/exec/except_node.cpp b/be/src/exec/except_node.cpp index 237b9b2c89b3f3..18db629881a045 100644 --- a/be/src/exec/except_node.cpp +++ b/be/src/exec/except_node.cpp @@ -49,7 +49,7 @@ Status ExceptNode::open(RuntimeState* state) { bool eos = false; for (int i = 1; i < _children.size(); ++i) { - // rebuid hash table, for first time will rebuild with the no duplicated _hash_tbl, + // rebuild hash table, for first time will rebuild with the no duplicated _hash_tbl, if (i > 1) { SCOPED_TIMER(_build_timer); std::unique_ptr temp_tbl( diff --git a/be/src/exec/except_node.h b/be/src/exec/except_node.h index 30a987b25b005f..9ea163289c0b9b 100644 --- a/be/src/exec/except_node.h +++ b/be/src/exec/except_node.h @@ -25,7 +25,7 @@ class MemPool; class RowBatch; class TupleRow; -// Node that calulate the except results of its children by either materializing their +// Node that calculate the except results of its children by either materializing their // evaluated expressions into row batches or passing through (forwarding) the // batches if the input tuple layout is identical to the output tuple layout // and expressions don't need to be evaluated. The except node pulls from its diff --git a/be/src/exec/hash_join_node_ir.cpp b/be/src/exec/hash_join_node_ir.cpp index 518eb767faa460..d301201e2e0c9a 100644 --- a/be/src/exec/hash_join_node_ir.cpp +++ b/be/src/exec/hash_join_node_ir.cpp @@ -27,7 +27,7 @@ namespace doris { // This lets us distinguish between the join conjuncts vs. non-join conjuncts // for codegen. // Note: don't declare this static. LLVM will pick the fastcc calling convention and -// we will not be able to replace the funcitons with codegen'd versions. +// we will not be able to replace the functions with codegen'd versions. // TODO: explicitly set the calling convention? // TODO: investigate using fastcc for all codegen internal functions? bool IR_NO_INLINE eval_other_join_conjuncts(ExprContext* const* ctxs, int num_ctxs, TupleRow* row) { diff --git a/be/src/exec/hash_table.h b/be/src/exec/hash_table.h index 010c9b7d9644ec..ddf7f36bed7f58 100644 --- a/be/src/exec/hash_table.h +++ b/be/src/exec/hash_table.h @@ -113,7 +113,7 @@ class HashTable { insert_impl(row); } - // Insert row into the hash table. if the row is alread exist will not insert + // Insert row into the hash table. if the row is already exist will not insert void IR_ALWAYS_INLINE insert_unique(TupleRow* row) { if (find(row, false) == end()) { insert(row); @@ -379,7 +379,7 @@ class HashTable { const int _num_build_tuples; // outer join || has null equal join should be true const bool _stores_nulls; - // true: the null-safe equal '<=>' is true. The row with null shoud be judged. + // true: the null-safe equal '<=>' is true. The row with null should be judged. // false: the equal '=' is false. The row with null should be filtered. const std::vector _finds_nulls; diff --git a/be/src/exec/intersect_node.h b/be/src/exec/intersect_node.h index c7290630360b29..554a9e4e4339e4 100644 --- a/be/src/exec/intersect_node.h +++ b/be/src/exec/intersect_node.h @@ -25,12 +25,12 @@ class MemPool; class RowBatch; class TupleRow; -// Node that calulate the intersect results of its children by either materializing their +// Node that calculate the intersect results of its children by either materializing their // evaluated expressions into row batches or passing through (forwarding) the // batches if the input tuple layout is identical to the output tuple layout // and expressions don't need to be evaluated. The children should be ordered // such that all passthrough children come before the children that need -// materialization. The interscet node pulls from its children sequentially, i.e. +// materialization. The intersect node pulls from its children sequentially, i.e. // it exhausts one child completely before moving on to the next one. class IntersectNode : public SetOperationNode { public: diff --git a/be/src/exec/json_scanner.cpp b/be/src/exec/json_scanner.cpp index de96866b44d83a..aa0633b0337e10 100644 --- a/be/src/exec/json_scanner.cpp +++ b/be/src/exec/json_scanner.cpp @@ -445,9 +445,9 @@ void JsonReader::_set_tuple_value(rapidjson::Value& objectValue, Tuple* tuple, c * handle input a simple json. * A json is a simple json only when user not specifying the json path. * For example: - * case 1. [{"colunm1":"value1", "colunm2":10}, {"colunm1":" -", "colunm2":30}] - * case 2. {"colunm1":"value1", "colunm2":10} + * case 1. [{"column1":"value1", "column2":10}, {"column1":" +", "column2":30}] + * case 2. {"column1":"value1", "column2":10} */ Status JsonReader::_handle_simple_json(Tuple* tuple, const std::vector& slot_descs, MemPool* tuple_pool, bool* eof) { do { @@ -570,9 +570,9 @@ Status JsonReader::_handle_nested_complex_json(Tuple* tuple, const std::vector _left_child_ctx; diff --git a/be/src/exec/odbc_scanner.cpp b/be/src/exec/odbc_scanner.cpp index fbaccc62d44395..a83b8cc14cdf98 100644 --- a/be/src/exec/odbc_scanner.cpp +++ b/be/src/exec/odbc_scanner.cpp @@ -30,7 +30,7 @@ } \ if (rc == SQL_ERROR) \ { \ - auto err_msg = std::string("Errro in") + std::string(op); \ + auto err_msg = std::string("Error in") + std::string(op); \ return Status::InternalError(err_msg.c_str()); \ } \ } \ @@ -105,12 +105,12 @@ Status ODBCScanner::query() { // Allocate a statement handle ODBC_DISPOSE(_dbc, SQL_HANDLE_DBC, SQLAllocHandle(SQL_HANDLE_STMT, _dbc, &_stmt), "alloc statement"); - // Translate utf8 string to utf16 to use unicode codeing + // Translate utf8 string to utf16 to use unicode code auto wquery = utf8_to_wstring(_sql_str); ODBC_DISPOSE(_stmt, SQL_HANDLE_STMT, SQLExecDirectW(_stmt, (SQLWCHAR*)(wquery.c_str()), SQL_NTS), "exec direct"); // How many columns are there */ - ODBC_DISPOSE(_stmt, SQL_HANDLE_STMT, SQLNumResultCols(_stmt, &_field_num), "count num colomn"); + ODBC_DISPOSE(_stmt, SQL_HANDLE_STMT, SQLNumResultCols(_stmt, &_field_num), "count num column"); LOG(INFO) << "execute success:" << _sql_str << " column count:" << _field_num; diff --git a/be/src/exec/olap_common.h b/be/src/exec/olap_common.h index 6cba3ef36983ef..8eda6266d9a15d 100644 --- a/be/src/exec/olap_common.h +++ b/be/src/exec/olap_common.h @@ -429,7 +429,7 @@ Status ColumnValueRange::add_range(SQLFilterOp op, T value) { } default: { - return Status::InternalError("AddRangefail! Unsupport SQLFilterOp."); + return Status::InternalError("Add Range fail! Unsupported SQLFilterOp."); } } @@ -476,7 +476,7 @@ Status ColumnValueRange::add_range(SQLFilterOp op, T value) { } default: { - return Status::InternalError("AddRangefail! Unsupport SQLFilterOp."); + return Status::InternalError("Add Range fail! Unsupported SQLFilterOp."); } } } diff --git a/be/src/exec/olap_rewrite_node.cpp b/be/src/exec/olap_rewrite_node.cpp index d113fe7265cf70..d592c5fe2af554 100644 --- a/be/src/exec/olap_rewrite_node.cpp +++ b/be/src/exec/olap_rewrite_node.cpp @@ -124,10 +124,10 @@ bool OlapRewriteNode::copy_one_row(TupleRow* src_row, Tuple* tuple, for (int i = 0; i < _columns.size(); ++i) { void* src_value = _columns[i]->get_value(src_row); SlotDescriptor* slot_desc = _output_tuple_desc->slots()[i]; - // support null for insert into statment + // support null for insert into statement if (!slot_desc->is_nullable()) { if (src_value == nullptr) { - //column in target table satify not null constraint + //column in target table satisfy not null constraint (*ss) << "column(" << slot_desc->col_name() << ")'s value is null"; return false; } diff --git a/be/src/exec/olap_scan_node.cpp b/be/src/exec/olap_scan_node.cpp index 43acfa82cb9b6a..831304ff4eb375 100644 --- a/be/src/exec/olap_scan_node.cpp +++ b/be/src/exec/olap_scan_node.cpp @@ -368,11 +368,11 @@ Status OlapScanNode::start_scan(RuntimeState* state) { RETURN_IF_ERROR(build_olap_filters()); VLOG(1) << "BuildScanKey"; - // 4. Using `Key Column`'s ColumnValueRange to split ScanRange to serval `Sub ScanRange` + // 4. Using `Key Column`'s ColumnValueRange to split ScanRange to several `Sub ScanRange` RETURN_IF_ERROR(build_scan_key()); VLOG(1) << "StartScanThread"; - // 6. Start multi thread to read serval `Sub Sub ScanRange` + // 6. Start multi thread to read several `Sub Sub ScanRange` RETURN_IF_ERROR(start_scan_thread(state)); return Status::OK(); @@ -489,7 +489,7 @@ Status OlapScanNode::normalize_conjuncts() { } default: { - VLOG(2) << "Unsupport Normalize Slot [ColName=" + VLOG(2) << "Unsupported Normalize Slot [ColName=" << slots[slot_idx]->col_name() << "]"; break; } @@ -916,9 +916,9 @@ Status OlapScanNode::normalize_in_and_eq_predicate(SlotDescriptor* slot, ColumnV break; } default: { - LOG(WARNING) << "Normalize filter fail, Unsupport Primitive type. [type=" + LOG(WARNING) << "Normalize filter fail, Unsupported Primitive type. [type=" << expr->type() << "]"; - return Status::InternalError("Normalize filter fail, Unsupport Primitive type"); + return Status::InternalError("Normalize filter fail, Unsupported Primitive type"); } } @@ -1064,9 +1064,9 @@ Status OlapScanNode::normalize_noneq_binary_predicate(SlotDescriptor* slot, Colu } default: { - LOG(WARNING) << "Normalize filter fail, Unsupport Primitive type. [type=" + LOG(WARNING) << "Normalize filter fail, Unsupported Primitive type. [type=" << expr->type() << "]"; - return Status::InternalError("Normalize filter fail, Unsupport Primitive type"); + return Status::InternalError("Normalize filter fail, Unsupported Primitive type"); } } @@ -1175,7 +1175,7 @@ void OlapScanNode::transfer_thread(RuntimeState* state) { RowBatchInterface* scan_batch = NULL; { - // 1 scanner idle task not empty, assign new sanner task + // 1 scanner idle task not empty, assign new scanner task std::unique_lock l(_scan_batches_lock); // scanner_row_num = 16k @@ -1198,7 +1198,7 @@ void OlapScanNode::transfer_thread(RuntimeState* state) { scan_batch = _scan_row_batches.front(); _scan_row_batches.pop_front(); - // delete scan_batch if transfer thread should be stoped + // delete scan_batch if transfer thread should be stopped // because scan_batch wouldn't be useful anymore if (UNLIKELY(_transfer_done)) { delete scan_batch; diff --git a/be/src/exec/olap_scan_node.h b/be/src/exec/olap_scan_node.h index 959b3c5adde5eb..f15217dfc95260 100644 --- a/be/src/exec/olap_scan_node.h +++ b/be/src/exec/olap_scan_node.h @@ -39,7 +39,7 @@ enum TransferStatus { INIT_HEAP = 2, BUILD_ROWBATCH = 3, MERGE = 4, - FININSH = 5, + FINISH = 5, ADD_ROWBATCH = 6, ERROR = 7 }; @@ -292,7 +292,7 @@ class OlapScanNode : public ScanNode { // number of created olap scanners RuntimeProfile::Counter* _num_scanners = nullptr; - // number of segment filted by column stat when creating seg iterator + // number of segment filtered by column stat when creating seg iterator RuntimeProfile::Counter* _filtered_segment_counter = nullptr; // total number of segment related to this scan node RuntimeProfile::Counter* _total_segment_counter = nullptr; diff --git a/be/src/exec/olap_scanner.cpp b/be/src/exec/olap_scanner.cpp index bdf2c2fa060cff..6044c924a9235e 100644 --- a/be/src/exec/olap_scanner.cpp +++ b/be/src/exec/olap_scanner.cpp @@ -217,7 +217,7 @@ Status OlapScanner::_init_return_columns() { int32_t index = _tablet->field_index(slot->col_name()); if (index < 0) { std::stringstream ss; - ss << "field name is invalied. field=" << slot->col_name(); + ss << "field name is invalid. field=" << slot->col_name(); LOG(WARNING) << ss.str(); return Status::InternalError(ss.str()); } @@ -285,7 +285,7 @@ Status OlapScanner::get_batch( VLOG_ROW << "OlapScanner input row: " << Tuple::to_string(tuple, *_tuple_desc); } - // 3.4 Set tuple to RowBatch(not commited) + // 3.4 Set tuple to RowBatch(not committed) int row_idx = batch->add_row(); TupleRow* row = batch->get_row(row_idx); row->set_tuple(_tuple_idx, tuple); diff --git a/be/src/exec/olap_scanner.h b/be/src/exec/olap_scanner.h index 0aed6ea3f84d9c..b6ca6d84bb90b1 100644 --- a/be/src/exec/olap_scanner.h +++ b/be/src/exec/olap_scanner.h @@ -104,7 +104,7 @@ class OlapScanner { RuntimeState* _runtime_state; OlapScanNode* _parent; - const TupleDescriptor* _tuple_desc; /**< tuple descripter */ + const TupleDescriptor* _tuple_desc; /**< tuple descriptor */ RuntimeProfile* _profile; const std::vector& _string_slots; diff --git a/be/src/exec/olap_utils.h b/be/src/exec/olap_utils.h index 4925e895b35aad..8a73c5b26b3f24 100644 --- a/be/src/exec/olap_utils.h +++ b/be/src/exec/olap_utils.h @@ -76,7 +76,7 @@ inline CompareLargeFunc get_compare_func(PrimitiveType type) { return compare_large; default: - DCHECK(false) << "Unsupport Compare type"; + DCHECK(false) << "Unsupported Compare type"; } } diff --git a/be/src/exec/partitioned_aggregation_node.h b/be/src/exec/partitioned_aggregation_node.h index e83b05fe713a70..bedadb171a3f01 100644 --- a/be/src/exec/partitioned_aggregation_node.h +++ b/be/src/exec/partitioned_aggregation_node.h @@ -68,7 +68,7 @@ class SlotDescriptor; /// In the case where the aggregate function does not return a string (meaning the /// size of all the slots is known when the row is constructed), this stream contains /// all the memory for the result rows and the MemPool (2) is not used. -/// 4) Unaggregated tuple stream. Stream to spill unaggregated rows. +/// 4) Aggregated tuple stream. Stream to spill aggregated rows. /// Rows in this stream always have child(0)'s layout. /// /// Buffering: Each stream and hash table needs to maintain at least one buffer for diff --git a/be/src/exec/partitioned_hash_table.h b/be/src/exec/partitioned_hash_table.h index 13209d37c85720..aec9a31cb86308 100644 --- a/be/src/exec/partitioned_hash_table.h +++ b/be/src/exec/partitioned_hash_table.h @@ -528,7 +528,7 @@ class PartitionedHashTable { }; struct Bucket { - /// Whether this bucket contains a vaild entry, or it is empty. + /// Whether this bucket contains a valid entry, or it is empty. bool filled; /// Used for full outer and right {outer, anti, semi} joins. Indicates whether the @@ -639,13 +639,13 @@ class PartitionedHashTable { /// Returns the number of bucket_with_duplicates int64_t num_duplicates_nodes() const { return num_duplicate_nodes_; } - /// Returns the number of probe opertions + /// Returns the number of probe operations int64_t num_probe() const { return num_probes_; } - /// Returns the number of failed probe opertions + /// Returns the number of failed probe operations int64_t num_failed_probe() const { return num_failed_probes_; } - /// Returns the number of travel_length of probe opertions + /// Returns the number of travel_length of probe operations int64_t travel_length() const { return travel_length_; } /// Returns the load factor (the number of non-empty buckets) @@ -929,7 +929,7 @@ class PartitionedHashTable { /// Byte size of all buffers in data_pages_. int64_t total_data_page_size_; - /// Next duplicate node to insert. Vaild when node_remaining_current_page_ > 0. + /// Next duplicate node to insert. Valid when node_remaining_current_page_ > 0. DuplicateNode* next_node_; /// Number of nodes left in the current page. diff --git a/be/src/exec/plain_text_line_reader.cpp b/be/src/exec/plain_text_line_reader.cpp index 6f7198ca1e9cdc..634640cc3659dc 100644 --- a/be/src/exec/plain_text_line_reader.cpp +++ b/be/src/exec/plain_text_line_reader.cpp @@ -28,7 +28,7 @@ // #define INPUT_CHUNK (34) #define OUTPUT_CHUNK (8 * 1024 * 1024) // #define OUTPUT_CHUNK (32) -// leave these 2 size small for debuging +// leave these 2 size small for debugging namespace doris { @@ -114,7 +114,7 @@ void PlainTextLineReader::extend_input_buf() { capacity = capacity + _input_buf_pos; if (capacity >= _more_input_bytes) { - // move the read remainings to the begining of the current input buf, + // move the read remaining to the beginning of the current input buf, memmove(_input_buf, _input_buf + _input_buf_pos, input_buf_read_remaining()); _input_buf_limit -= _input_buf_pos; _input_buf_pos = 0; @@ -155,7 +155,7 @@ void PlainTextLineReader::extend_output_buf() { // 2. try reuse buf capacity = capacity + _output_buf_pos; if (capacity >= target) { - // move the read remainings to the begining of the current output buf, + // move the read remaining to the beginning of the current output buf, memmove(_output_buf, _output_buf + _output_buf_pos, output_buf_read_remaining()); _output_buf_limit -= _output_buf_pos; _output_buf_pos = 0; @@ -256,7 +256,7 @@ Status PlainTextLineReader::read_line(const uint8_t** ptr, size_t* size, bool* e _output_buf_limit += read_len; _stream_end = true; } else { - // only update inpub limit. + // only update input limit. // input pos is set at MARK step _input_buf_limit += read_len; } diff --git a/be/src/exec/repeat_node.h b/be/src/exec/repeat_node.h index 9f734f713c8fab..7e70acb3e84d51 100644 --- a/be/src/exec/repeat_node.h +++ b/be/src/exec/repeat_node.h @@ -50,7 +50,7 @@ class RepeatNode : public ExecNode { // An integer bitmap list, it indicates the bit position of the exprs not null. std::vector _repeat_id_list; std::vector> _grouping_list; - // Tulple id used for output, it has new slots. + // Tuple id used for output, it has new slots. TupleId _output_tuple_id; const TupleDescriptor* _tuple_desc; diff --git a/be/src/exec/schema_scanner/schema_columns_scanner.cpp b/be/src/exec/schema_scanner/schema_columns_scanner.cpp index f18fc526b0523b..affeac34382e50 100644 --- a/be/src/exec/schema_scanner/schema_columns_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_columns_scanner.cpp @@ -87,7 +87,7 @@ Status SchemaColumnsScanner::start(RuntimeState *state) { RETURN_IF_ERROR(SchemaHelper::get_db_names(*(_param->ip), _param->port, db_params, &_db_result)); } else { - return Status::InternalError("IP or port dosn't exists"); + return Status::InternalError("IP or port doesn't exists"); } return Status::OK(); @@ -107,7 +107,7 @@ std::string SchemaColumnsScanner::to_mysql_data_type_string(TColumnDesc &desc) { case TPrimitiveType::BIGINT: return "bigint"; case TPrimitiveType::LARGEINT: - return "bigint unsinged"; + return "bigint unsigned"; case TPrimitiveType::FLOAT: return "float"; case TPrimitiveType::DOUBLE: @@ -142,7 +142,7 @@ std::string SchemaColumnsScanner::type_to_string(TColumnDesc &desc) { case TPrimitiveType::BIGINT: return "bigint(20)"; case TPrimitiveType::LARGEINT: - return "bigint(20) unsinged"; + return "bigint(20) unsigned"; case TPrimitiveType::FLOAT: return "float"; case TPrimitiveType::DOUBLE: @@ -393,7 +393,7 @@ Status SchemaColumnsScanner::get_new_desc() { RETURN_IF_ERROR(SchemaHelper::describe_table(*(_param->ip), _param->port, desc_params, &_desc_result)); } else { - return Status::InternalError("IP or port dosn't exists"); + return Status::InternalError("IP or port doesn't exists"); } _column_index = 0; @@ -421,7 +421,7 @@ Status SchemaColumnsScanner::get_new_table() { RETURN_IF_ERROR(SchemaHelper::get_table_names(*(_param->ip), _param->port, table_params, &_table_result)); } else { - return Status::InternalError("IP or port dosn't exists"); + return Status::InternalError("IP or port doesn't exists"); } _table_index = 0; return Status::OK(); diff --git a/be/src/exec/schema_scanner/schema_helper.cpp b/be/src/exec/schema_scanner/schema_helper.cpp index 2efa61a7d4112d..edd03d3746a3ab 100644 --- a/be/src/exec/schema_scanner/schema_helper.cpp +++ b/be/src/exec/schema_scanner/schema_helper.cpp @@ -87,7 +87,7 @@ Status SchemaHelper::describe_table( }); } -Status SchemaHelper::show_varialbes( +Status SchemaHelper::show_variables( const std::string& ip, const int32_t port, const TShowVariableRequest &request, diff --git a/be/src/exec/schema_scanner/schema_helper.h b/be/src/exec/schema_scanner/schema_helper.h index 5b261f174718a5..964329dd0a5d1a 100644 --- a/be/src/exec/schema_scanner/schema_helper.h +++ b/be/src/exec/schema_scanner/schema_helper.h @@ -49,7 +49,7 @@ class SchemaHelper { const TDescribeTableParams &desc_params, TDescribeTableResult *desc_result); - static Status show_varialbes( + static Status show_variables( const std::string& ip, const int32_t port, const TShowVariableRequest &var_params, diff --git a/be/src/exec/schema_scanner/schema_variables_scanner.cpp b/be/src/exec/schema_scanner/schema_variables_scanner.cpp index 7fe508954eacce..d117475c48b0f3 100644 --- a/be/src/exec/schema_scanner/schema_variables_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_variables_scanner.cpp @@ -53,7 +53,7 @@ Status SchemaVariablesScanner::start(RuntimeState *state) { var_params.__set_threadId(_param->thread_id); if (NULL != _param->ip && 0 != _param->port) { - RETURN_IF_ERROR(SchemaHelper::show_varialbes(*(_param->ip), + RETURN_IF_ERROR(SchemaHelper::show_variables(*(_param->ip), _param->port, var_params, &_var_result)); } else { return Status::InternalError("IP or port doesn't exists"); diff --git a/be/src/exec/tablet_info.cpp b/be/src/exec/tablet_info.cpp index 703d941d993716..54628e47557ab6 100644 --- a/be/src/exec/tablet_info.cpp +++ b/be/src/exec/tablet_info.cpp @@ -300,7 +300,7 @@ Status OlapTablePartitionParam::_create_partition_key(const TExprNode& t_expr, T *reinterpret_cast(slot) = t_expr.int_literal.value; break; default: - DCHECK(false) << "unsupport int literal type, type=" << t_expr.type.types[0].type; + DCHECK(false) << "unsupported int literal type, type=" << t_expr.type.types[0].type; break; } break; diff --git a/be/src/exec/tablet_sink.cpp b/be/src/exec/tablet_sink.cpp index 1787fff778d5de..5fa6d17e7fabfb 100644 --- a/be/src/exec/tablet_sink.cpp +++ b/be/src/exec/tablet_sink.cpp @@ -422,7 +422,7 @@ Status IndexChannel::add_row(Tuple* tuple, int64_t tablet_id) { } bool IndexChannel::has_intolerable_failure() { - return _failed_channels.size() >= ((_parent->_num_repicas + 1) / 2); + return _failed_channels.size() >= ((_parent->_num_replicas + 1) / 2); } OlapTableSink::OlapTableSink(ObjectPool* pool, const RowDescriptor& row_desc, @@ -451,7 +451,7 @@ Status OlapTableSink::init(const TDataSink& t_sink) { _txn_id = table_sink.txn_id; _db_id = table_sink.db_id; _table_id = table_sink.table_id; - _num_repicas = table_sink.num_replicas; + _num_replicas = table_sink.num_replicas; _need_gen_rollup = table_sink.need_gen_rollup; _db_name = table_sink.db_name; _table_name = table_sink.table_name; @@ -859,7 +859,7 @@ int OlapTableSink::_validate_data(RuntimeState* state, RowBatch* batch, Bitmap* } } if (*dec_val > _max_decimal_val[i] || *dec_val < _min_decimal_val[i]) { - ss << "decimal value is not valid for defination, column=" << desc->col_name() + ss << "decimal value is not valid for definition, column=" << desc->col_name() << ", value=" << dec_val->to_string() << ", precision=" << desc->type().precision << ", scale=" << desc->type().scale; @@ -880,7 +880,7 @@ int OlapTableSink::_validate_data(RuntimeState* state, RowBatch* batch, Bitmap* } } if (dec_val > _max_decimalv2_val[i] || dec_val < _min_decimalv2_val[i]) { - ss << "decimal value is not valid for defination, column=" << desc->col_name() + ss << "decimal value is not valid for definition, column=" << desc->col_name() << ", value=" << dec_val.to_string() << ", precision=" << desc->type().precision << ", scale=" << desc->type().scale; diff --git a/be/src/exec/tablet_sink.h b/be/src/exec/tablet_sink.h index e41c4827cd8d52..09d7d3cf44f4ab 100644 --- a/be/src/exec/tablet_sink.h +++ b/be/src/exec/tablet_sink.h @@ -145,7 +145,7 @@ class NodeChannel { NodeChannel(OlapTableSink* parent, int64_t index_id, int64_t node_id, int32_t schema_hash); ~NodeChannel() noexcept; - // called before open, used to add tablet loacted in this backend + // called before open, used to add tablet located in this backend void add_tablet(const TTabletWithPartition& tablet) { _all_tablets.emplace_back(tablet); } Status init(RuntimeState* state); @@ -214,7 +214,7 @@ class NodeChannel { // send finished means the consumer thread which send the rpc can exit std::atomic _send_finished{false}; - // add batches finished means the last rpc has be responsed, used to check whether this channel can be closed + // add batches finished means the last rpc has be response, used to check whether this channel can be closed std::atomic _add_batches_finished{false}; bool _eos_is_produced{false}; // only for restricting producer behaviors @@ -278,7 +278,7 @@ class IndexChannel { // Write data to Olap Table. // When OlapTableSink::open() called, there will be a consumer thread running in the background. -// When you call OlapTableSink::send(), you will be the productor who products pending batches. +// When you call OlapTableSink::send(), you will be the producer who products pending batches. // Join the consumer thread in close(). class OlapTableSink : public DataSink { public: @@ -313,7 +313,7 @@ class OlapTableSink : public DataSink { // the consumer func of sending pending batches in every NodeChannel. // use polling & NodeChannel::try_send_and_fetch_status() to achieve nonblocking sending. - // only focus on pending batches and channel status, the internal errors of NodeChannels will be handled by the productor + // only focus on pending batches and channel status, the internal errors of NodeChannels will be handled by the producer void _send_batch_process(); private: @@ -330,7 +330,7 @@ class OlapTableSink : public DataSink { int64_t _txn_id = -1; int64_t _db_id = -1; int64_t _table_id = -1; - int _num_repicas = -1; + int _num_replicas = -1; bool _need_gen_rollup = false; std::string _db_name; std::string _table_name; diff --git a/be/src/exprs/case_expr.cpp b/be/src/exprs/case_expr.cpp index b1fde7c203bad5..95a5b9e65b1784 100644 --- a/be/src/exprs/case_expr.cpp +++ b/be/src/exprs/case_expr.cpp @@ -220,20 +220,20 @@ bool CaseExpr::any_val_eq(const TypeDescriptor& type, const AnyVal* v1, const An return THEN_TYPE::null(); \ } -#define CASE_COMPUTE_FN_WAPPER(TYPE, TYPE_NAME) \ +#define CASE_COMPUTE_FN_WRAPPER(TYPE, TYPE_NAME) \ CASE_COMPUTE_FN(TYPE, TYPE_NAME) -CASE_COMPUTE_FN_WAPPER(BooleanVal, boolean_val) -CASE_COMPUTE_FN_WAPPER(TinyIntVal, tiny_int_val) -CASE_COMPUTE_FN_WAPPER(SmallIntVal, small_int_val) -CASE_COMPUTE_FN_WAPPER(IntVal, int_val) -CASE_COMPUTE_FN_WAPPER(BigIntVal, big_int_val) -CASE_COMPUTE_FN_WAPPER(FloatVal, float_val) -CASE_COMPUTE_FN_WAPPER(DoubleVal, double_val) -CASE_COMPUTE_FN_WAPPER(StringVal, string_val) -CASE_COMPUTE_FN_WAPPER(DateTimeVal, datetime_val) -CASE_COMPUTE_FN_WAPPER(DecimalVal, decimal_val) -CASE_COMPUTE_FN_WAPPER(DecimalV2Val, decimalv2_val) +CASE_COMPUTE_FN_WRAPPER(BooleanVal, boolean_val) +CASE_COMPUTE_FN_WRAPPER(TinyIntVal, tiny_int_val) +CASE_COMPUTE_FN_WRAPPER(SmallIntVal, small_int_val) +CASE_COMPUTE_FN_WRAPPER(IntVal, int_val) +CASE_COMPUTE_FN_WRAPPER(BigIntVal, big_int_val) +CASE_COMPUTE_FN_WRAPPER(FloatVal, float_val) +CASE_COMPUTE_FN_WRAPPER(DoubleVal, double_val) +CASE_COMPUTE_FN_WRAPPER(StringVal, string_val) +CASE_COMPUTE_FN_WRAPPER(DateTimeVal, datetime_val) +CASE_COMPUTE_FN_WRAPPER(DecimalVal, decimal_val) +CASE_COMPUTE_FN_WRAPPER(DecimalV2Val, decimalv2_val) } diff --git a/be/src/exprs/decimalv2_operators.cpp b/be/src/exprs/decimalv2_operators.cpp index ef836165666120..730171562862cb 100644 --- a/be/src/exprs/decimalv2_operators.cpp +++ b/be/src/exprs/decimalv2_operators.cpp @@ -153,7 +153,7 @@ DateTimeVal DecimalV2Operators::cast_to_date_val( return DateTimeVal::null(); } - // convert from DecimalV2Val to DecimalV2Value for caculation + // convert from DecimalV2Val to DecimalV2Value for calculation const DecimalV2Value& dv = DecimalV2Value::from_decimal_val(val); DateTimeValue dt; if (!dt.from_date_int64(dv)) { diff --git a/be/src/exprs/json_functions.h b/be/src/exprs/json_functions.h index 657a9c75f3b682..b0b86d15bb943e 100644 --- a/be/src/exprs/json_functions.h +++ b/be/src/exprs/json_functions.h @@ -28,7 +28,7 @@ enum JsonFunctionType { JSON_FUN_DOUBLE, JSON_FUN_STRING, - JSON_FUN_UNKOWN //The last + JSON_FUN_UNKNOWN //The last }; class Expr; diff --git a/be/src/exprs/like_predicate.h b/be/src/exprs/like_predicate.h index e4542a7fe15020..5ff519253949f5 100644 --- a/be/src/exprs/like_predicate.h +++ b/be/src/exprs/like_predicate.h @@ -45,7 +45,7 @@ class LikePredicate { /// and whether the pattern has any constant substrings. If the pattern is not a /// constant argument, none of the following fields can be set because we cannot know /// the format of the pattern in the prepare function and must deal with each pattern - /// seperately. + /// separately. LikePredicateFunction function; /// Holds the string the StringValue points to and is set any time StringValue is diff --git a/be/src/geo/geo_types.cpp b/be/src/geo/geo_types.cpp index f14b1f3266edcc..1c1de72954a771 100644 --- a/be/src/geo/geo_types.cpp +++ b/be/src/geo/geo_types.cpp @@ -82,7 +82,7 @@ static void remove_duplicate_points(std::vector* points) { static GeoParseStatus to_s2loop(const GeoCoordinateList& coords, std::unique_ptr* loop) { - // 1. covnert all coordinates to points + // 1. convert all coordinates to points std::vector points(coords.list.size()); for (int i = 0; i < coords.list.size(); ++i) { auto res = to_s2point(coords.list[i], &points[i]); @@ -112,7 +112,7 @@ static GeoParseStatus to_s2loop(const GeoCoordinateList& coords, static GeoParseStatus to_s2polyline(const GeoCoordinateList& coords, std::unique_ptr* polyline) { - // 1. covnert all coordinates to points + // 1. convert all coordinates to points std::vector points(coords.list.size()); for (int i = 0; i < coords.list.size(); ++i) { auto res = to_s2point(coords.list[i], &points[i]); diff --git a/be/src/geo/wkt_parse.h b/be/src/geo/wkt_parse.h index 235609cbdc649a..891c3e73c0145b 100644 --- a/be/src/geo/wkt_parse.h +++ b/be/src/geo/wkt_parse.h @@ -27,7 +27,7 @@ class GeoShape; class WktParse { public: // Parse WKT(Well Known Text) to a GeoShape. - // Return a valid GeoShape if input WKT is supppored. + // Return a valid GeoShape if input WKT is supported. // Return null if WKT is not supported or invalid static GeoParseStatus parse_wkt(const char* str, size_t len, GeoShape** shape); }; diff --git a/be/src/geo/wkt_yacc.y b/be/src/geo/wkt_yacc.y index d60a54f28bb42e..b43f75d4b58e68 100644 --- a/be/src/geo/wkt_yacc.y +++ b/be/src/geo/wkt_yacc.y @@ -25,7 +25,7 @@ class WktParseContext; void wkt_error(WktParseContext* ctx, const char* msg) { } -/* forword declare this class for wkt_parse declaration in yacc.y.cpp */ +/* forward declare this class for wkt_parse declaration in yacc.y.cpp */ %} %union { @@ -38,7 +38,7 @@ void wkt_error(WktParseContext* ctx, const char* msg) { %code { /* we need yyscan_t in WktParseContext, so we include lex.h here, - * and we shoud include this header after union define, because it + * and we should include this header after union define, because it * need YYSTYPE */ #include "geo/wkt_lex.l.h" diff --git a/be/src/gutil/casts.h b/be/src/gutil/casts.h index 8f5966a8d1d4bd..4c132ae47a0308 100644 --- a/be/src/gutil/casts.h +++ b/be/src/gutil/casts.h @@ -299,7 +299,7 @@ inline bool loose_enum_test(int e_val) { // Find the binary bounding negative of both e_min and e_max. b_min &= e_min; - // However, if e_min is postive, the result will be positive. + // However, if e_min is positive, the result will be positive. // Now clear all bits right of the most significant clear bit, // which is a negative saturation for negative numbers. // In the case of positive numbers, this is flush to zero. @@ -315,13 +315,13 @@ inline bool loose_enum_test(int e_val) { // Find the unary bounding positive number of e_max. int b_max = e_max_sign ^ e_max; - // Find the binary bounding postive number of that + // Find the binary bounding positive number of that // and the unary bounding positive number of e_min. int e_min_sign = e_min >> (sizeof(e_val)*8 - 1); b_max |= e_min_sign ^ e_min; // Now set all bits right of the most significant set bit, - // which is a postive saturation for positive numbers. + // which is a positive saturation for positive numbers. b_max |= b_max >> 1; b_max |= b_max >> 2; b_max |= b_max >> 4; diff --git a/be/src/gutil/hash/jenkins_lookup2.h b/be/src/gutil/hash/jenkins_lookup2.h index 679380967c671f..209af9f3e0026a 100644 --- a/be/src/gutil/hash/jenkins_lookup2.h +++ b/be/src/gutil/hash/jenkins_lookup2.h @@ -149,7 +149,7 @@ static inline uint32 Google1At(const char *ptr2) { // Historically, WORD_HASH has always been defined as we always run on // machines that don't NEED_ALIGNED_LOADS and which IS_LITTLE_ENDIAN. // -// TODO(user): find occurences of WORD_HASH and adjust the code to +// TODO(user): find occurrences of WORD_HASH and adjust the code to // use more meaningful concepts. # define WORD_HASH diff --git a/be/src/gutil/hash/legacy_hash.h b/be/src/gutil/hash/legacy_hash.h index 5a93599a4b0998..8872356be1d3bd 100644 --- a/be/src/gutil/hash/legacy_hash.h +++ b/be/src/gutil/hash/legacy_hash.h @@ -32,7 +32,7 @@ static const uint64 MIX64 = GG_ULONGLONG(0x2b992ddfa23249d6); // more of pi // overloading) and return 32 or 16 bit quantities, respectively. // The basic rule of our hashing is: always mix(). Thus, even for // char outputs we cast to a uint32 and mix with two arbitrary numbers. -// HashTo32 never returns kIllegalHash32, and similary, +// HashTo32 never returns kIllegalHash32, and similarity, // HashTo16 never returns kIllegalHash16. // // Note that these methods avoid returning certain reserved values, while diff --git a/be/src/gutil/move.h b/be/src/gutil/move.h index 17651086ba182a..d94ebf6fddf74c 100644 --- a/be/src/gutil/move.h +++ b/be/src/gutil/move.h @@ -118,7 +118,7 @@ // It is tempting to want to use the RValue type in function parameters, but // excluding the limited usage here for the move constructor and move // operator=, doing so would mean that the function could take both r-values -// and l-values equially which is unexpected. See COMPARED To Boost.Move for +// and l-values equally which is unexpected. See COMPARED To Boost.Move for // more details. // // An alternate, and incorrect, implementation of the RValue class used by diff --git a/be/src/gutil/spinlock_internal.h b/be/src/gutil/spinlock_internal.h index a7f5150ce7badf..1af38abf292604 100644 --- a/be/src/gutil/spinlock_internal.h +++ b/be/src/gutil/spinlock_internal.h @@ -52,7 +52,7 @@ struct SpinLockWaitTransition { // Wait until *w can transition from trans[i].from to trans[i].to for some i // satisfying 0<=i& components, // ---------------------------------------------------------------------- // JoinUsingToBuffer() // This merges a vector of string components with delim inserted -// as separaters between components. +// as separators between components. // User supplies the result buffer with specified buffer size. // The result is also returned for convenience. // @@ -84,7 +84,7 @@ char* JoinUsingToBuffer(const vector& components, // ---------------------------------------------------------------------- // JoinStrings() // This merges a vector of string components with delim inserted -// as separaters between components. +// as separators between components. // This is essentially the same as JoinUsingToBuffer except // it uses strings instead of char *s. // @@ -155,7 +155,7 @@ void JoinVectorKeysAndValues(const vector< pair>& components, // gratuitous spacing and quoting. 'output' must point to an empty string. // // Example: -// [Google], [x], [Buchheit, Paul], [string with " quoite in it], [ space ] +// [Google], [x], [Buchheit, Paul], [string with " quote in it], [ space ] // ---> [Google,x,"Buchheit, Paul","string with "" quote in it"," space "] // ---------------------------------------------------------------------- void JoinCSVLineWithDelimiter(const vector& cols, char delimiter, diff --git a/be/src/gutil/strings/join.h b/be/src/gutil/strings/join.h index d6f2b54daa5d82..097cf7aa42b957 100644 --- a/be/src/gutil/strings/join.h +++ b/be/src/gutil/strings/join.h @@ -329,7 +329,7 @@ void JoinCSVLineWithDelimiter(const vector& original_cols, // JoinElements() // This merges a container of any type supported by StrAppend() with delim // inserted as separators between components. This is essentially a -// templatized version of JoinUsingToBuffer(). +// templatize version of JoinUsingToBuffer(). // // JoinElementsIterator() // Same as JoinElements(), except that the input elements are specified diff --git a/be/src/gutil/strings/split.cc b/be/src/gutil/strings/split.cc index 904a4aea66d225..4d8cee523db7cf 100644 --- a/be/src/gutil/strings/split.cc +++ b/be/src/gutil/strings/split.cc @@ -479,7 +479,7 @@ void SplitToVector(char* full, const char* delim, vector* vec, if (omit_empty_strings && next[0] == '\0') continue; vec->push_back(next); } - // Add last element (or full string if no delimeter found): + // Add last element (or full string if no delimiter found): if (full != nullptr) { vec->push_back(full); } @@ -492,7 +492,7 @@ void SplitToVector(char* full, const char* delim, vector* vec, if (omit_empty_strings && next[0] == '\0') continue; vec->push_back(next); } - // Add last element (or full string if no delimeter found): + // Add last element (or full string if no delimiter found): if (full != nullptr) { vec->push_back(full); } diff --git a/be/src/gutil/template_util.h b/be/src/gutil/template_util.h index a16a991c829868..007f84d2290e66 100644 --- a/be/src/gutil/template_util.h +++ b/be/src/gutil/template_util.h @@ -103,7 +103,7 @@ template struct is_const : true_type {}; template struct is_void : false_type {}; template <> struct is_void : true_type {}; -// if_ is a templatized conditional statement. +// if_ is a templatize conditional statement. // if_ is a compile time evaluation of cond. // if_<>::type contains A if cond is true, B otherwise. template diff --git a/be/src/http/action/mini_load.cpp b/be/src/http/action/mini_load.cpp index 49de508d4449d0..943f8dbe35d5c9 100644 --- a/be/src/http/action/mini_load.cpp +++ b/be/src/http/action/mini_load.cpp @@ -214,7 +214,7 @@ Status MiniLoadAction::_load( << ") because: " << e.what(); status = client.reopen(config::thrift_rpc_timeout_ms); if (!status.ok()) { - LOG(WARNING) << "Client repoen failed. with address(" + LOG(WARNING) << "Client reopen failed. with address(" << master_address.hostname << ":" << master_address.port << ")"; return status; } @@ -226,7 +226,7 @@ Status MiniLoadAction::_load( status = client.reopen(config::thrift_rpc_timeout_ms); if (!status.ok()) { - LOG(WARNING) << "Client repoen failed. with address(" + LOG(WARNING) << "Client reopen failed. with address(" << master_address.hostname << ":" << master_address.port << ")"; return status; } @@ -294,7 +294,7 @@ Status MiniLoadAction::check_auth( << ") because: " << e.what(); status = client.reopen(config::thrift_rpc_timeout_ms); if (!status.ok()) { - LOG(WARNING) << "Client repoen failed. with address(" + LOG(WARNING) << "Client reopen failed. with address(" << master_address.hostname << ":" << master_address.port << ")"; return status; } @@ -306,7 +306,7 @@ Status MiniLoadAction::check_auth( status = client.reopen(config::thrift_rpc_timeout_ms); if (!status.ok()) { - LOG(WARNING) << "Client repoen failed. with address(" + LOG(WARNING) << "Client reopen failed. with address(" << master_address.hostname << ":" << master_address.port << ")"; return status; } @@ -334,7 +334,7 @@ void MiniLoadAction::erase_handle(const LoadHandle& desc) { } int MiniLoadAction::on_header(HttpRequest* req) { - // check authorization first, make client know what happend + // check authorization first, make client know what happened if (req->header(HttpHeaders::AUTHORIZATION).empty()) { HttpChannel::send_basic_challenge(req, "mini_load"); return -1; @@ -551,7 +551,7 @@ void MiniLoadAction::handle(HttpRequest *http_req) { void MiniLoadAction::_handle(HttpRequest* http_req) { MiniLoadAsyncCtx* ctx = ((MiniLoadCtx*) http_req->handler_ctx())->mini_load_async_ctx; if (ctx == nullptr) { - // when ctx is nullptr, there must be error happend when on_chunk_data + // when ctx is nullptr, there must be error happened when on_chunk_data // and reply is sent, we just return with no operation LOG(WARNING) << "handler context is nullptr when MiniLoad callback execute, uri=" << http_req->uri(); @@ -563,7 +563,7 @@ void MiniLoadAction::_handle(HttpRequest* http_req) { << ", body_bytes=" << ctx->body_bytes << ", bytes_written=" << ctx->bytes_written; HttpChannel::send_reply(http_req, HttpStatus::INTERNAL_SERVER_ERROR, - "rececpt size not equal with body size"); + "receipt size not equal with body size"); return; } auto st = _load( @@ -700,7 +700,7 @@ Status MiniLoadAction::_process_put(HttpRequest* req, StreamLoadContext* ctx) { std::map hll_map; RETURN_IF_ERROR(StringParser::split_string_to_map(hll_value, ":", ",", &hll_map)); if (hll_map.empty()) { - return Status::InvalidArgument("Hll value could not tranform to hll expr: " + hll_value); + return Status::InvalidArgument("Hll value could not transform to hll expr: " + hll_value); } for (auto& hll_element: hll_map) { columns_value += "," + hll_element.first @@ -847,10 +847,10 @@ void MiniLoadAction::_new_handle(HttpRequest* req) { Status MiniLoadAction::_on_new_handle(StreamLoadContext* ctx) { if (ctx->body_bytes > 0 && ctx->receive_bytes != ctx->body_bytes) { - LOG(WARNING) << "recevie body don't equal with body bytes, body_bytes=" + LOG(WARNING) << "receive body don't equal with body bytes, body_bytes=" << ctx->body_bytes << ", receive_bytes=" << ctx->receive_bytes << ", id=" << ctx->id; - return Status::InternalError("receive body dont't equal with body bytes"); + return Status::InternalError("receive body don't equal with body bytes"); } // wait stream load sink finish diff --git a/be/src/http/action/stream_load.cpp b/be/src/http/action/stream_load.cpp index b7ba234a22b8b7..64f3bb09f6d0ad 100644 --- a/be/src/http/action/stream_load.cpp +++ b/be/src/http/action/stream_load.cpp @@ -141,7 +141,7 @@ Status StreamLoadAction::_handle(StreamLoadContext* ctx) { LOG(WARNING) << "recevie body don't equal with body bytes, body_bytes=" << ctx->body_bytes << ", receive_bytes=" << ctx->receive_bytes << ", id=" << ctx->id; - return Status::InternalError("receive body dont't equal with body bytes"); + return Status::InternalError("receive body don't equal with body bytes"); } if (!ctx->use_streaming) { // if we use non-streaming, we need to close file first, @@ -156,7 +156,7 @@ Status StreamLoadAction::_handle(StreamLoadContext* ctx) { // wait stream load finish RETURN_IF_ERROR(ctx->future.get()); - // If put file succeess we need commit this load + // If put file success we need commit this load int64_t commit_and_publish_start_time = MonotonicNanos(); RETURN_IF_ERROR(_exec_env->stream_load_executor()->commit_txn(ctx)); ctx->commit_and_publish_txn_cost_nanos = MonotonicNanos() - commit_and_publish_start_time; diff --git a/be/src/http/download_action.cpp b/be/src/http/download_action.cpp index 88d5ad3de780ed..24524ff8234231 100644 --- a/be/src/http/download_action.cpp +++ b/be/src/http/download_action.cpp @@ -130,7 +130,7 @@ void DownloadAction::handle(HttpRequest *req) { handle_normal(req, file_path); } - LOG(INFO) << "deal with download requesst finished! "; + LOG(INFO) << "deal with download request finished! "; } Status DownloadAction::check_token(HttpRequest *req) { diff --git a/be/src/http/http_channel.cpp b/be/src/http/http_channel.cpp index 7b68370272c89f..8d8d80cac6c00a 100644 --- a/be/src/http/http_channel.cpp +++ b/be/src/http/http_channel.cpp @@ -44,12 +44,12 @@ void HttpChannel::send_basic_challenge(HttpRequest* req, const std::string& real } void HttpChannel::send_error(HttpRequest* request, HttpStatus status) { - evhttp_send_error(request->get_evhttp_request(), status, defalut_reason(status).c_str()); + evhttp_send_error(request->get_evhttp_request(), status, default_reason(status).c_str()); } void HttpChannel::send_reply(HttpRequest* request, HttpStatus status) { evhttp_send_reply(request->get_evhttp_request(), status, - defalut_reason(status).c_str(), nullptr); + default_reason(status).c_str(), nullptr); } void HttpChannel::send_reply( @@ -62,7 +62,7 @@ void HttpChannel::send_reply( } else { evbuffer_add(evb, content.c_str(), content.size()); } - evhttp_send_reply(request->get_evhttp_request(), status, defalut_reason(status).c_str(), evb); + evhttp_send_reply(request->get_evhttp_request(), status, default_reason(status).c_str(), evb); evbuffer_free(evb); } @@ -71,7 +71,7 @@ void HttpChannel::send_file(HttpRequest* request, int fd, size_t off, size_t siz evbuffer_add_file(evb, fd, off, size); evhttp_send_reply(request->get_evhttp_request(), HttpStatus::OK, - defalut_reason(HttpStatus::OK).c_str(), evb); + default_reason(HttpStatus::OK).c_str(), evb); evbuffer_free(evb); } diff --git a/be/src/http/http_client.cpp b/be/src/http/http_client.cpp index 0f09384570ed26..89ca55ae8fd78e 100644 --- a/be/src/http/http_client.cpp +++ b/be/src/http/http_client.cpp @@ -38,7 +38,7 @@ Status HttpClient::init(const std::string& url) { if (_curl == nullptr) { _curl = curl_easy_init(); if (_curl == nullptr) { - return Status::InternalError("fail to initalize curl"); + return Status::InternalError("fail to initialize curl"); } } else { curl_easy_reset(_curl); diff --git a/be/src/http/http_client.h b/be/src/http/http_client.h index d54bc680d33aaf..ef57a1658c5154 100644 --- a/be/src/http/http_client.h +++ b/be/src/http/http_client.h @@ -109,7 +109,7 @@ class HttpClient { return execute(); } - // helper function to download a file, you can call this function to downlaod + // helper function to download a file, you can call this function to download // a file to local_path Status download(const std::string& local_path); diff --git a/be/src/http/http_handler.h b/be/src/http/http_handler.h index 258140694c06c8..097d313686521e 100644 --- a/be/src/http/http_handler.h +++ b/be/src/http/http_handler.h @@ -31,7 +31,7 @@ class HttpHandler { virtual bool request_will_be_read_progressively() { return false; } - // This funciton will called when all headers are recept. + // This function will called when all headers are receipt. // return 0 if process successfully. otherwise return -1; // If return -1, on_header function should send_reply to HTTP client // and function wont send any reply any more. diff --git a/be/src/http/http_parser.h b/be/src/http/http_parser.h index f953ee8d74335b..178be318f7aa27 100644 --- a/be/src/http/http_parser.h +++ b/be/src/http/http_parser.h @@ -56,7 +56,7 @@ class HttpParser { // PARSE_AGAIN return this means that caller need to call this function with new data // from network // PARSE_DONE All of chunks readed - // PARSE_ERROR Error happend + // PARSE_ERROR Error happened static ParseState http_parse_chunked(const uint8_t** buf, const int64_t buf_len, HttpChunkParseCtx* ctx); diff --git a/be/src/http/http_status.cpp b/be/src/http/http_status.cpp index 08c53f50f92ebc..3aa40b5b258b4a 100644 --- a/be/src/http/http_status.cpp +++ b/be/src/http/http_status.cpp @@ -65,7 +65,7 @@ static std::map s_reason_map = { { HttpStatus::HTTP_VERSION_NOT_SUPPORTED, "HTTP Version not supported" } }; -std::string defalut_reason(const HttpStatus& status) { +std::string default_reason(const HttpStatus& status) { auto iter = s_reason_map.find(status); if (iter != s_reason_map.end()) { return iter->second; diff --git a/be/src/http/http_status.h b/be/src/http/http_status.h index 74a0a769e6125f..729500f1d4d762 100644 --- a/be/src/http/http_status.h +++ b/be/src/http/http_status.h @@ -67,7 +67,7 @@ enum HttpStatus { std::string to_code(const HttpStatus& status); -std::string defalut_reason(const HttpStatus& status); +std::string default_reason(const HttpStatus& status); } diff --git a/be/src/olap/base_compaction.cpp b/be/src/olap/base_compaction.cpp index 85b3363cad10f1..d938c58aaeb313 100644 --- a/be/src/olap/base_compaction.cpp +++ b/be/src/olap/base_compaction.cpp @@ -62,7 +62,7 @@ OLAPStatus BaseCompaction::compact() { OLAPStatus BaseCompaction::pick_rowsets_to_compact() { _input_rowsets.clear(); - _tablet->pick_candicate_rowsets_to_base_compaction(&_input_rowsets); + _tablet->pick_candidate_rowsets_to_base_compaction(&_input_rowsets); if (_input_rowsets.size() <= 1) { return OLAP_ERR_BE_NO_SUITABLE_VERSION; } diff --git a/be/src/olap/byte_buffer.h b/be/src/olap/byte_buffer.h index 9232ae656dc690..4f867ab79c0a95 100644 --- a/be/src/olap/byte_buffer.h +++ b/be/src/olap/byte_buffer.h @@ -42,9 +42,9 @@ class StorageByteBuffer { // 新buffer的position为0, limit为capacity // 调用者获得新建的ByteBuffer的所有权,并需使用delete删除获得的StorageByteBuffer // - // TODO. 我认为这里create用法应该是直接返回ByteBuffer本身而不是? - // ??针,否则智能指针就无法发挥作用 - // 目前内存的管理还是手动的。而且需要认为deleta。 + // TODO. 我认为这里create用法应该是直接返回ByteBuffer本身而不是智能指 + // 针,否则智能指针就无法发挥作用 + // 目前内存的管理还是手动的。而且需要认为delete。 static StorageByteBuffer* create(uint64_t capacity); // 通过引用另一个ByteBuffer的内存创建一个新的StorageByteBuffer diff --git a/be/src/olap/cumulative_compaction.cpp b/be/src/olap/cumulative_compaction.cpp index c2c8755c694e6e..49d7c0ebac5b27 100755 --- a/be/src/olap/cumulative_compaction.cpp +++ b/be/src/olap/cumulative_compaction.cpp @@ -77,7 +77,7 @@ OLAPStatus CumulativeCompaction::compact() { OLAPStatus CumulativeCompaction::pick_rowsets_to_compact() { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction( + _tablet->pick_candidate_rowsets_to_cumulative_compaction( config::cumulative_compaction_skip_window_seconds, &candidate_rowsets); if (candidate_rowsets.empty()) { @@ -99,7 +99,7 @@ OLAPStatus CumulativeCompaction::pick_rowsets_to_compact() { if (_last_delete_version.first != -1) { // we meet a delete version, should increase the cumulative point to let base compaction handle the delete version. // plus 1 to skip the delete version. - // NOTICE: after that, the cumulative point may be larger than max version of this tablet, but it doen't matter. + // NOTICE: after that, the cumulative point may be larger than max version of this tablet, but it doesn't matter. _tablet->set_cumulative_layer_point(_last_delete_version.first + 1); return OLAP_ERR_CUMULATIVE_NO_SUITABLE_VERSIONS; } @@ -129,7 +129,7 @@ OLAPStatus CumulativeCompaction::pick_rowsets_to_compact() { } } - // all candicate rowsets are non-overlapping, increase the cumulative point + // all candidate rowsets are non-overlapping, increase the cumulative point _tablet->set_cumulative_layer_point(candidate_rowsets.back()->start_version() + 1); } } else { diff --git a/be/src/olap/cumulative_compaction_policy.cpp b/be/src/olap/cumulative_compaction_policy.cpp index 2e28c58af57dec..dc3257e81eb393 100644 --- a/be/src/olap/cumulative_compaction_policy.cpp +++ b/be/src/olap/cumulative_compaction_policy.cpp @@ -69,7 +69,7 @@ void SizeBasedCumulativeCompactionPolicy::calculate_cumulative_point(Tablet* tab // calculate promotion size auto base_rowset_meta = existing_rss.begin(); - // check base rowset frist version must be zero + // check base rowset first version must be zero CHECK((*base_rowset_meta)->start_version() == 0); int64_t promotion_size = 0; @@ -424,7 +424,7 @@ void NumBasedCumulativeCompactionPolicy::calculate_cumulative_point(Tablet* tabl } } -void CumulativeCompactionPolicy::pick_candicate_rowsets(int64_t skip_window_sec, +void CumulativeCompactionPolicy::pick_candidate_rowsets(int64_t skip_window_sec, const std::unordered_map& rs_version_map, int64_t cumulative_point, std::vector* candidate_rowsets) { diff --git a/be/src/olap/cumulative_compaction_policy.h b/be/src/olap/cumulative_compaction_policy.h index 941e0814c839e6..47608a8d45980c 100644 --- a/be/src/olap/cumulative_compaction_policy.h +++ b/be/src/olap/cumulative_compaction_policy.h @@ -33,7 +33,7 @@ class Tablet; /// This CompactionPolicy enum is used to represent the type of compaction policy. /// Now it has two values, NUM_BASED_POLICY and SIZE_BASED_POLICY. /// NUM_BASED_POLICY means current compaction policy implemented by num based policy. -/// SIZE_BASED_POLICY means current comapction policy implemented by size_based policy. +/// SIZE_BASED_POLICY means current compaction policy implemented by size_based policy. enum CompactionPolicy { NUM_BASED_POLICY = 0, SIZE_BASED_POLICY = 1, @@ -44,9 +44,9 @@ const static std::string CUMULATIVE_SIZE_BASED_POLICY = "SIZE_BASED"; /// This class CumulativeCompactionPolicy is the base class of cumulative compaction policy. /// It defines the policy to do cumulative compaction. It has different derived classes, which implements /// concrete cumulative compaction algorithm. The policy is configured by conf::cumulative_compaction_policy. -/// The policy functions is the main steps to do cumulative compaction. For example, how to pick candicate +/// The policy functions is the main steps to do cumulative compaction. For example, how to pick candidate /// rowsets from tablet using current policy, how to calculate the cumulative point and how to calculate -/// the tablet cumulative compcation score and so on. +/// the tablet cumulative compaction score and so on. class CumulativeCompactionPolicy { public: @@ -69,19 +69,19 @@ class CumulativeCompactionPolicy { const std::vector& all_rowsets, int64_t current_cumulative_point, uint32_t* score) = 0; - /// This function implements the policy which represents how to pick the candicate rowsets for compaction. + /// This function implements the policy which represents how to pick the candidate rowsets for compaction. /// This base class gives a unified implementation. Its derived classes also can override this function each other. /// param skip_window_sec, it means skipping the rowsets which use create time plus skip_window_sec is greater than now. /// param rs_version_map, mapping from version to rowset /// param cumulative_point, current cumulative point of tablet /// return candidate_rowsets, the container of candidate rowsets - virtual void pick_candicate_rowsets( + virtual void pick_candidate_rowsets( int64_t skip_window_sec, const std::unordered_map& rs_version_map, int64_t cumulative_point, std::vector* candidate_rowsets); /// Pick input rowsets from candidate rowsets for compaction. This function is pure virtual function. - /// Its implemention depands on concrete compaction policy. + /// Its implementation depends on concrete compaction policy. /// param candidate_rowsets, the candidate_rowsets vector container to pick input rowsets /// return input_rowsets, the vector container as return /// return last_delete_version, if has delete rowset, record the delete version from input_rowsets @@ -94,7 +94,7 @@ class CumulativeCompactionPolicy { Version* last_delete_version, size_t* compaction_score) = 0; /// Update tablet's cumulative point after cumulative compaction finished. This function is pure virtual function. - /// Each derived has its own update policy which deponds on its concrete algorithm. When the cumulative point moves + /// Each derived has its own update policy which depends on its concrete algorithm. When the cumulative point moves /// after output rowset, then output rowset will do base compaction next time. /// param input_rowsets, the picked input rowset to do compaction just now /// param output_rowset, the result rowset after compaction @@ -102,9 +102,9 @@ class CumulativeCompactionPolicy { RowsetSharedPtr output_rowset, Version& last_delete_version) = 0; - /// Calculate tablet's cumulatvie point before compaction. This calculation just executes once when the tablet compacts - /// first time after BE initialization and then motion of cumulatvie point depends on update_cumulative_point policy. - /// This function is pure virtual function. In genaral, the cumulative point splits the rowsets into two parts: + /// Calculate tablet's cumulatiuve point before compaction. This calculation just executes once when the tablet compacts + /// first time after BE initialization and then motion of cumulatiuve point depends on update_cumulative_point policy. + /// This function is pure virtual function. In general, the cumulative point splits the rowsets into two parts: /// base rowsets, cumulative rowsets. /// param all_rowsets, all rowsets in the tablet /// param current_cumulative_point, current cumulative position @@ -118,7 +118,7 @@ class CumulativeCompactionPolicy { virtual std::string name() = 0; }; -/// Num based cumulative compcation policy implemention. Num based policy which derives CumulativeCompactionPolicy is early +/// Num based cumulative compaction policy implemention. Num based policy which derives CumulativeCompactionPolicy is early /// basic algorithm. This policy uses linear structure to compact rowsets. The cumulative rowsets compact only once and /// then the output will do base compaction. It can make segments of rowsets in order and compact small rowsets to a bigger one. class NumBasedCumulativeCompactionPolicy final : public CumulativeCompactionPolicy { @@ -164,7 +164,7 @@ class NumBasedCumulativeCompactionPolicy final : public CumulativeCompactionPoli std::string name() { return CUMULATIVE_NUM_BASED_POLICY; } }; -/// SizeBased cumulative compcation policy implemention. SizeBased policy which derives CumulativeCompactionPolicy is a optimized +/// SizeBased cumulative compaction policy implemention. SizeBased policy which derives CumulativeCompactionPolicy is a optimized /// version of num based cumulative compaction policy. This policy also uses linear structure to compact rowsets. The cumulative rowsets /// can do compaction when they are in same level size. And when output rowset exceeds the promotion radio of base size or min promotion /// size, it will do base compaction. This policy is targeting the use cases requiring lower write amplification, trading off read @@ -190,7 +190,7 @@ class SizeBasedCumulativeCompactionPolicy final : public CumulativeCompactionPol /// SizeBased cumulative compaction policy implements calculate cumulative point function. /// When the first time the tablet does compact, this calculation is executed. Its main policy is to find first rowset - /// which does not satifie the promotion conditions. + /// which does not satisfied the promotion conditions. void calculate_cumulative_point(Tablet* tablet, const std::vector& all_rowsets, int64_t current_cumulative_point, int64_t* cumulative_point) override; @@ -205,8 +205,8 @@ class SizeBasedCumulativeCompactionPolicy final : public CumulativeCompactionPol Version* last_delete_version, size_t* compaction_score) override; /// SizeBased cumulative compaction policy implements update cumulative point function. - /// Its main policy is judging the output rowset size whether satifies the promotion size. - /// If it satified, this policy will update the cumulative point. + /// Its main policy is judging the output rowset size whether satisfied the promotion size. + /// If it satisfied, this policy will update the cumulative point. void update_cumulative_point(Tablet* tablet, const std::vector& input_rowsets, RowsetSharedPtr _output_rowset, Version& last_delete_version); @@ -219,7 +219,7 @@ class SizeBasedCumulativeCompactionPolicy final : public CumulativeCompactionPol std::string name() { return CUMULATIVE_SIZE_BASED_POLICY; } private: - /// calculate promotion size using current base rowset meta size and promition configs + /// calculate promotion size using current base rowset meta size and promotion configs void _calc_promotion_size(RowsetMetaSharedPtr base_rowset_meta, int64_t* promotion_size); /// calculate the disk size belong to which level, the level is divide by power of 2 @@ -227,7 +227,7 @@ class SizeBasedCumulativeCompactionPolicy final : public CumulativeCompactionPol /// and cumulative_size_based_promotion_size_mbytes int _level_size(const int64_t size); - /// when policy calcalute cumulative_compaction_score, update promotion size at the same time + /// when policy calculate cumulative_compaction_score, update promotion size at the same time void _refresh_tablet_size_based_promotion_size(int64_t promotion_size); private: @@ -245,11 +245,11 @@ class SizeBasedCumulativeCompactionPolicy final : public CumulativeCompactionPol std::vector _levels; }; -/// The factory of CumulativeCompactionPolicy, it can product diffrent policy according to the `policy` parameter. +/// The factory of CumulativeCompactionPolicy, it can product different policy according to the `policy` parameter. class CumulativeCompactionPolicyFactory { public: - /// Static factory function. It can product diffrent policy according to the `policy` parameter and use tablet ptr + /// Static factory function. It can product different policy according to the `policy` parameter and use tablet ptr /// to construct the policy. Now it can product size based and num based policies. static std::unique_ptr create_cumulative_compaction_policy( std::string policy); diff --git a/be/src/olap/data_dir.cpp b/be/src/olap/data_dir.cpp index 55a21947f4227d..86650d144cb701 100644 --- a/be/src/olap/data_dir.cpp +++ b/be/src/olap/data_dir.cpp @@ -615,12 +615,12 @@ OLAPStatus DataDir::_check_incompatible_old_format_tablet() { if (config::storage_strict_check_incompatible_old_format) { LOG(FATAL) << "There are incompatible old format metas, current version does not support " << "and it may lead to data missing!!! " - << "talbet_id = " << tablet_id << " schema_hash = " << schema_hash; + << "tablet_id = " << tablet_id << " schema_hash = " << schema_hash; } else { LOG(WARNING) << "There are incompatible old format metas, current version does not support " << "and it may lead to data missing!!! " - << "talbet_id = " << tablet_id << " schema_hash = " << schema_hash; + << "tablet_id = " << tablet_id << " schema_hash = " << schema_hash; } return false; }; diff --git a/be/src/olap/delete_handler.cpp b/be/src/olap/delete_handler.cpp index a58b3e401c5769..28c8d77e438281 100644 --- a/be/src/olap/delete_handler.cpp +++ b/be/src/olap/delete_handler.cpp @@ -161,7 +161,7 @@ OLAPStatus DeleteConditionHandler::check_condition_valid( return OLAP_ERR_DELETE_INVALID_CONDITION; } - // 检查指定的列是不是key,是不是float或doulbe类型 + // 检查指定的列是不是key,是不是float或double类型 const TabletColumn& column = schema.column(field_index); if ((!column.is_key() && schema.keys_type() != KeysType::DUP_KEYS) @@ -199,9 +199,9 @@ bool DeleteHandler::_parse_condition(const std::string& condition_str, TConditio try { // Condition string format, the format is (column_name)(op)(value) // eg: condition_str="c1 = 1597751948193618247 and length(source)<1;\n;\n" - // group1: (\w+) matchs "c1" - // group2: ((?:=)|(?:!=)|(?:>>)|(?:<<)|(?:>=)|(?:<=)|(?:\*=)|(?:IS)) matchs "=" - // group3: ((?:[\s\S]+)?) matchs "1597751948193618247 and length(source)<1;\n;\n" + // group1: (\w+) matches "c1" + // group2: ((?:=)|(?:!=)|(?:>>)|(?:<<)|(?:>=)|(?:<=)|(?:\*=)|(?:IS)) matches "=" + // group3: ((?:[\s\S]+)?) matches "1597751948193618247 and length(source)<1;\n;\n" const char* const CONDITION_STR_PATTERN = R"((\w+)\s*((?:=)|(?:!=)|(?:>>)|(?:<<)|(?:>=)|(?:<=)|(?:\*=)|(?:IS))\s*((?:[\s\S]+)?))"; regex ex(CONDITION_STR_PATTERN); @@ -230,7 +230,7 @@ bool DeleteHandler::_parse_condition(const std::string& condition_str, TConditio OLAPStatus DeleteHandler::init(const TabletSchema& schema, const DelPredicateArray& delete_conditions, int32_t version) { if (_is_inited) { - OLAP_LOG_WARNING("reintialize delete handler."); + OLAP_LOG_WARNING("reinitialize delete handler."); return OLAP_ERR_INIT_FAILED; } diff --git a/be/src/olap/delta_writer.cpp b/be/src/olap/delta_writer.cpp index 9d42e6e1607da6..ad9b54c172f5c2 100644 --- a/be/src/olap/delta_writer.cpp +++ b/be/src/olap/delta_writer.cpp @@ -224,7 +224,7 @@ OLAPStatus DeltaWriter::close() { // which means this tablet has no data loaded, but at least one tablet // in same partition has data loaded. // so we have to also init this DeltaWriter, so that it can create a empty rowset - // for this tablet when being closd. + // for this tablet when being closed. RETURN_NOT_OK(init()); } diff --git a/be/src/olap/field.h b/be/src/olap/field.h index f36787d00256a3..7382722743d6f2 100644 --- a/be/src/olap/field.h +++ b/be/src/olap/field.h @@ -80,9 +80,9 @@ class Field { // todo(kks): Unify AggregateInfo::init method and Field::agg_init method // This function will initialize destination with source. - // This functionn differs copy functionn in that if this field - // contain aggregate information, this functionn will initialize - // destination in aggregate format, and update with srouce content. + // This function differs copy function in that if this field + // contain aggregate information, this function will initialize + // destination in aggregate format, and update with source content. virtual void agg_init(RowCursorCell* dst, const RowCursorCell& src, MemPool* mem_pool, ObjectPool* agg_pool) const { direct_copy(dst, src); } @@ -118,7 +118,7 @@ class Field { // Only compare column content, without considering NULL condition. // RETURNS: // 0 means equal, - // -1 means left less than rigth, + // -1 means left less than right, // 1 means left bigger than right int compare(const void* left, const void* right) const { return _type_info->cmp(left, right); @@ -131,7 +131,7 @@ class Field { // Only compare column content, without considering NULL condition. // RETURNS: // 0 means equal, - // -1 means left less than rigth, + // -1 means left less than right, // 1 means left bigger than right template int compare_cell(const LhsCellType& lhs, @@ -165,7 +165,7 @@ class Field { // deep copy source cell' content to destination cell. // For string type, this will allocate data form pool, - // and copy srouce's conetent. + // and copy source's content. template void copy_object(DstCellType* dst, const SrcCellType& src, MemPool* pool) const { bool is_null = src.is_null(); @@ -178,7 +178,7 @@ class Field { // deep copy source cell' content to destination cell. // For string type, this will allocate data form pool, - // and copy srouce's conetent. + // and copy source's content. template void deep_copy(DstCellType* dst, const SrcCellType& src, @@ -207,7 +207,7 @@ class Field { return _type_info->convert_from(dest, src, src_type, mem_pool); } - // Copy srouce content to destination in index format. + // Copy source content to destination in index format. template void to_index(DstCellType* dst, const SrcCellType& src) const; @@ -304,9 +304,9 @@ int Field::index_cmp(const LhsCellType& lhs, const RhsCellType& rhs) const { // so calculate the min of the three size as new compare_size compare_size = std::min(std::min(compare_size, (int)l_slice->size), (int)r_slice->size); - // This functionn is used to compare prefix index. + // This function is used to compare prefix index. // Only the fixed length of prefix index should be compared. - // If r_slice->size > l_slice->size, igonre the extra parts directly. + // If r_slice->size > l_slice->size, ignore the extra parts directly. res = strncmp(l_slice->data, r_slice->data, compare_size); if (res == 0 && compare_size != (_index_size - OLAP_STRING_MAX_BYTES)) { if (l_slice->size < r_slice->size) { diff --git a/be/src/olap/file_helper.h b/be/src/olap/file_helper.h index 35f1c2901efff5..31b0a7b71ad6db 100644 --- a/be/src/olap/file_helper.h +++ b/be/src/olap/file_helper.h @@ -176,7 +176,7 @@ class FileHandlerWithBuf { typedef struct _FixedFileHeader { // 整个文件的长度 uint32_t file_length; - // 文件除了FileHeader之外的内容的checkcum + // 文件除了FileHeader之外的内容的checksum uint32_t checksum; // Protobuf部分的长度 uint32_t protobuf_length; @@ -189,7 +189,7 @@ typedef struct _FixedFileHeaderV2 { uint32_t version; // 整个文件的长度 uint64_t file_length; - // 文件除了FileHeader之外的内容的checkcum + // 文件除了FileHeader之外的内容的checksum uint32_t checksum; // Protobuf部分的长度 uint64_t protobuf_length; diff --git a/be/src/olap/fs/block_manager.cpp b/be/src/olap/fs/block_manager.cpp index e5938ada2bda4d..47043ca1bfd97a 100644 --- a/be/src/olap/fs/block_manager.cpp +++ b/be/src/olap/fs/block_manager.cpp @@ -45,7 +45,7 @@ namespace fs { // - users could always change this to "never", which slows down // throughput but may improve write latency. // -// TODO(lingbin): move it to conf later, to allow adjust dynamicaly. +// TODO(lingbin): move it to conf later, to allow adjust dynamically. const std::string BlockManager::block_manager_preflush_control = "finalize"; } // namespace fs diff --git a/be/src/olap/generic_iterators.cpp b/be/src/olap/generic_iterators.cpp index c73504e27670a7..222997afc45114 100644 --- a/be/src/olap/generic_iterators.cpp +++ b/be/src/olap/generic_iterators.cpp @@ -118,7 +118,7 @@ class MergeIteratorContext { : _iter(iter), _block(iter->schema(), 1024) { } - // Intialize this context and will prepare data for current_row() + // Initialize this context and will prepare data for current_row() Status init(const StorageReadOptions& opts); // Return current row which internal row index points to diff --git a/be/src/olap/hll.cpp b/be/src/olap/hll.cpp index fd3e507b5dad16..b718a47c60bde4 100644 --- a/be/src/olap/hll.cpp +++ b/be/src/olap/hll.cpp @@ -52,7 +52,7 @@ void HyperLogLog::_convert_explicit_to_register() { std::set().swap(_hash_set); } -// Change HLL_DATA_EXPLICIT to HLL_DATA_FULL directly, because HLL_DATA_SPRASE +// Change HLL_DATA_EXPLICIT to HLL_DATA_FULL directly, because HLL_DATA_SPARSE // is implemented in the same way in memory with HLL_DATA_FULL. void HyperLogLog::update(uint64_t hash_value) { switch (_type) { @@ -61,14 +61,14 @@ void HyperLogLog::update(uint64_t hash_value) { _type = HLL_DATA_EXPLICIT; break; case HLL_DATA_EXPLICIT: - if (_hash_set.size() < HLL_EXPLICLIT_INT64_NUM) { + if (_hash_set.size() < HLL_EXPLICIT_INT64_NUM) { _hash_set.insert(hash_value); break; } _convert_explicit_to_register(); _type = HLL_DATA_FULL; // fall through - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: _update_registers(hash_value); break; @@ -88,7 +88,7 @@ void HyperLogLog::merge(const HyperLogLog& other) { case HLL_DATA_EXPLICIT: _hash_set = other._hash_set; break; - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: _registers = new uint8_t[HLL_REGISTERS_COUNT]; memcpy(_registers, other._registers, HLL_REGISTERS_COUNT); @@ -101,15 +101,15 @@ void HyperLogLog::merge(const HyperLogLog& other) { case HLL_DATA_EXPLICIT: { switch (other._type) { case HLL_DATA_EXPLICIT: - // Merge other's explicit values first, then check if the number is exccede - // HLL_EXPLICLIT_INT64_NUM. This is OK because the max value is 2 * 160. + // Merge other's explicit values first, then check if the number is exceed + // HLL_EXPLICIT_INT64_NUM. This is OK because the max value is 2 * 160. _hash_set.insert(other._hash_set.begin(), other._hash_set.end()); - if (_hash_set.size() > HLL_EXPLICLIT_INT64_NUM) { + if (_hash_set.size() > HLL_EXPLICIT_INT64_NUM) { _convert_explicit_to_register(); _type = HLL_DATA_FULL; } break; - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: _convert_explicit_to_register(); _merge_registers(other._registers); @@ -120,7 +120,7 @@ void HyperLogLog::merge(const HyperLogLog& other) { } break; } - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: { switch (other._type) { case HLL_DATA_EXPLICIT: @@ -128,7 +128,7 @@ void HyperLogLog::merge(const HyperLogLog& other) { _update_registers(hash_value); } break; - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: _merge_registers(other._registers); break; @@ -147,7 +147,7 @@ size_t HyperLogLog::max_serialized_size() const { return 1; case HLL_DATA_EXPLICIT: return 2 + _hash_set.size() * 8; - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: return 1 + HLL_REGISTERS_COUNT; } @@ -164,9 +164,9 @@ size_t HyperLogLog::serialize(uint8_t* dst) const { break; } case HLL_DATA_EXPLICIT: { - DCHECK(_hash_set.size() < HLL_EXPLICLIT_INT64_NUM) + DCHECK(_hash_set.size() < HLL_EXPLICIT_INT64_NUM) << "Number of explicit elements(" << _hash_set.size() - << ") should be less or equal than " << HLL_EXPLICLIT_INT64_NUM; + << ") should be less or equal than " << HLL_EXPLICIT_INT64_NUM; *ptr++ = _type; *ptr++ = (uint8_t)_hash_set.size(); for (auto hash_value : _hash_set) { @@ -175,7 +175,7 @@ size_t HyperLogLog::serialize(uint8_t* dst) const { } break; } - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: { uint32_t num_non_zero_registers = 0; for (int i = 0; i < HLL_REGISTERS_COUNT; i++) { @@ -191,7 +191,7 @@ size_t HyperLogLog::serialize(uint8_t* dst) const { memcpy(ptr, _registers, HLL_REGISTERS_COUNT); ptr += HLL_REGISTERS_COUNT; } else { - *ptr++ = HLL_DATA_SPRASE; + *ptr++ = HLL_DATA_SPARSE; // 2-5(4 byte): number of registers encode_fixed32_le(ptr, num_non_zero_registers); ptr += 4; @@ -231,7 +231,7 @@ bool HyperLogLog::is_valid(const Slice& slice) { ptr += num_explicits * 8; break; } - case HLL_DATA_SPRASE: { + case HLL_DATA_SPARSE: { if ((ptr + 4) > end) { return false; } @@ -256,7 +256,7 @@ bool HyperLogLog::deserialize(const Slice& slice) { // NOTE(zc): Don't remove this check unless you known what // you are doing. Because of history bug, we ingest some - // invalid HLL data in storge, which ptr is nullptr. + // invalid HLL data in storage, which ptr is nullptr. // we must handle this case to avoid process crash. // This bug is in release 0.10, I think we can remove this // in release 0.12 or later. @@ -285,7 +285,7 @@ bool HyperLogLog::deserialize(const Slice& slice) { } break; } - case HLL_DATA_SPRASE: { + case HLL_DATA_SPARSE: { _registers = new uint8_t[HLL_REGISTERS_COUNT]; memset(_registers, 0, HLL_REGISTERS_COUNT); @@ -350,16 +350,16 @@ int64_t HyperLogLog::estimate_cardinality() const { harmonic_mean = 1.0f / harmonic_mean; double estimate = alpha * num_streams * num_streams * harmonic_mean; - // according to HerperLogLog current correction, if E is cardinal + // according to HyperLogLog current correction, if E is cardinal // E =< num_streams * 2.5 , LC has higher accuracy. - // num_streams * 2.5 < E , HerperLogLog has higher accuracy. - // Generally , we can use HerperLogLog to produce value as E. + // num_streams * 2.5 < E , HyperLogLog has higher accuracy. + // Generally , we can use HyperLogLog to produce value as E. if (estimate <= num_streams * 2.5 && num_zero_registers != 0) { // Estimated cardinality is too low. Hll is too inaccurate here, instead use // linear counting. estimate = num_streams * log(static_cast(num_streams) / num_zero_registers); } else if (num_streams == 16384 && estimate < 72000) { - // when Linear Couint change to HerperLoglog according to HerperLogLog Correction, + // when Linear Couint change to HyperLogLog according to HyperLogLog Correction, // there are relatively large fluctuations, we fixed the problem refer to redis. double bias = 5.9119 * 1.0e-18 * (estimate * estimate * estimate * estimate) - 1.4253 * 1.0e-12 * (estimate * estimate * estimate) + @@ -381,11 +381,11 @@ void HllSetResolver::parse() { // first byte : type // second~five byte : hash values's number // five byte later : hash value - _explicit_num = (ExpliclitLengthValueType) (pdata[sizeof(SetTypeValueType)]); + _explicit_num = (ExplicitLengthValueType) (pdata[sizeof(SetTypeValueType)]); _explicit_value = (uint64_t*)(pdata + sizeof(SetTypeValueType) - + sizeof(ExpliclitLengthValueType)); + + sizeof(ExplicitLengthValueType)); break; - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: // first byte : type // second ~(2^HLL_COLUMN_PRECISION)/8 byte : bitmap mark which is not zero // 2^HLL_COLUMN_PRECISION)/8 + 1以后value @@ -412,7 +412,7 @@ void HllSetResolver::parse() { void HllSetHelper::set_sparse( char *result, const std::map& index_to_value, int& len) { - result[0] = HLL_DATA_SPRASE; + result[0] = HLL_DATA_SPARSE; len = sizeof(HllSetResolver::SetTypeValueType) + sizeof(HllSetResolver::SparseLengthValueType); char* write_value_pos = result + len; for (std::map::const_iterator iter = index_to_value.begin(); @@ -430,9 +430,9 @@ void HllSetHelper::set_sparse( void HllSetHelper::set_explicit(char* result, const std::set& hash_value_set, int& len) { result[0] = HLL_DATA_EXPLICIT; - result[1] = (HllSetResolver::ExpliclitLengthValueType)(hash_value_set.size()); + result[1] = (HllSetResolver::ExplicitLengthValueType)(hash_value_set.size()); len = sizeof(HllSetResolver::SetTypeValueType) - + sizeof(HllSetResolver::ExpliclitLengthValueType); + + sizeof(HllSetResolver::ExplicitLengthValueType); char* write_pos = result + len; for (std::set::const_iterator iter = hash_value_set.begin(); iter != hash_value_set.end(); iter++) { diff --git a/be/src/olap/hll.h b/be/src/olap/hll.h index 20eb834e5d265b..78b5fcefa10231 100644 --- a/be/src/olap/hll.h +++ b/be/src/olap/hll.h @@ -31,7 +31,7 @@ class Slice; const static int HLL_COLUMN_PRECISION = 14; const static int HLL_ZERO_COUNT_BITS = (64 - HLL_COLUMN_PRECISION); -const static int HLL_EXPLICLIT_INT64_NUM = 160; +const static int HLL_EXPLICIT_INT64_NUM = 160; const static int HLL_SPARSE_THRESHOLD = 4096; const static int HLL_REGISTERS_COUNT = 16 * 1024; // maximum size in byte of serialized HLL: type(1) + registers (2^14) @@ -54,11 +54,11 @@ const static int HLL_EMPTY_SIZE = 1; // // HLL_DATA_EXPLICIT: when there is only few values in set, store these values explicit. // If the number of hash values is not greater than 160, set is encoded in this format. -// The max space occupied is (1 + 1 + 160 * 8) = 1282. I don't know why 160 is choosed, +// The max space occupied is (1 + 1 + 160 * 8) = 1282. I don't know why 160 is chosen, // maybe can be other number. If you are interested, you can try other number and see // if it will be better. // -// HLL_DATA_SPRASE: only store non-zero registers. If the number of non-zero registers +// HLL_DATA_SPARSE: only store non-zero registers. If the number of non-zero registers // is not greater than 4096, set is encoded in this format. The max space occupied is // (1 + 4 + 3 * 4096) = 12293. // @@ -72,7 +72,7 @@ const static int HLL_EMPTY_SIZE = 1; enum HllDataType { HLL_DATA_EMPTY = 0, HLL_DATA_EXPLICIT = 1, - HLL_DATA_SPRASE = 2, + HLL_DATA_SPARSE = 2, HLL_DATA_FULL = 3, }; @@ -110,7 +110,7 @@ class HyperLogLog { // Return actual size of serialized binary. size_t serialize(uint8_t* dst) const; - // Now, only empty HLL support this funciton. + // Now, only empty HLL support this function. bool deserialize(const Slice& slice); int64_t estimate_cardinality() const; @@ -134,7 +134,7 @@ class HyperLogLog { case HLL_DATA_EMPTY: return {}; case HLL_DATA_EXPLICIT: - case HLL_DATA_SPRASE: + case HLL_DATA_SPARSE: case HLL_DATA_FULL: { std::string str {"hash set size: "}; @@ -154,7 +154,7 @@ class HyperLogLog { HllDataType _type = HLL_DATA_EMPTY; std::set _hash_set; - // This field is much space consumming(HLL_REGISTERS_COUNT), we craete + // This field is much space consuming(HLL_REGISTERS_COUNT), we create // it only when it is really needed. uint8_t* _registers = nullptr; @@ -196,7 +196,7 @@ class HllSetResolver { ~HllSetResolver() {} typedef uint8_t SetTypeValueType; - typedef uint8_t ExpliclitLengthValueType; + typedef uint8_t ExplicitLengthValueType; typedef int32_t SparseLengthValueType; typedef uint16_t SparseIndexType; typedef uint8_t SparseValueType; @@ -243,7 +243,7 @@ private : HllDataType _set_type; //set type char* _full_value_position; uint64_t* _explicit_value; - ExpliclitLengthValueType _explicit_num; + ExplicitLengthValueType _explicit_num; std::map _sparse_map; SparseLengthValueType* _sparse_count; }; diff --git a/be/src/olap/olap_common.h b/be/src/olap/olap_common.h index 889f0e3eb23a3f..96ed10c297b339 100644 --- a/be/src/olap/olap_common.h +++ b/be/src/olap/olap_common.h @@ -306,7 +306,7 @@ struct RowsetId { } } - // to compatiable with old version + // to compatible with old version void init(int64_t rowset_id) { init(1, rowset_id, 0, 0); } diff --git a/be/src/olap/olap_cond.cpp b/be/src/olap/olap_cond.cpp index 1238b741e1d57f..5e37e76e107832 100644 --- a/be/src/olap/olap_cond.cpp +++ b/be/src/olap/olap_cond.cpp @@ -35,7 +35,7 @@ using std::vector; using doris::ColumnStatistics; //此文件主要用于对用户发送的查询条件和删除条件进行处理,逻辑上二者都可以分为三层 -//Condtiion->Condcolumn->Cond +//Condition->Condcolumn->Cond //Condition表示用户发的单个条件 //Condcolumn表示一列上所有条件的集合。 //Conds表示一列上的单个条件. @@ -158,8 +158,8 @@ OLAPStatus Cond::init(const TCondition& tcond, const TabletColumn& column) { max_value_field = f.get(); } - auto insert_reslut = operand_set.insert(f.get()); - if (!insert_reslut.second) { + auto insert_result = operand_set.insert(f.get()); + if (!insert_result.second) { LOG(WARNING) << "Duplicate operand in in-predicate.[condition=" << operand << "]"; // Duplicated, let unique_ptr delete field } else { @@ -527,28 +527,28 @@ int CondColumn::del_eval(const std::pair& statisti /* * the relationship between cond A and B is A & B. * if all delete condition is satisfied, the data can be filtered. - * elseif any delete condition is not satifsified, the data can't be filtered. + * elseif any delete condition is not satisfied, the data can't be filtered. * else is the partial satisfied. */ int ret = DEL_NOT_SATISFIED; - bool del_partial_statified = false; - bool del_not_statified = false; + bool del_partial_satisfied = false; + bool del_not_satisfied = false; for (auto& each_cond : _conds) { int del_ret = each_cond->del_eval(statistic); if (DEL_SATISFIED == del_ret) { continue; } else if (DEL_PARTIAL_SATISFIED == del_ret) { - del_partial_statified = true; + del_partial_satisfied = true; } else { - del_not_statified = true; + del_not_satisfied = true; break; } } - if (del_not_statified || _conds.empty()) { + if (del_not_satisfied || _conds.empty()) { // if the size of condcolumn vector is zero, // the delete condtion is not satisfied. ret = DEL_NOT_SATISFIED; - } else if (del_partial_statified) { + } else if (del_partial_satisfied) { ret = DEL_PARTIAL_SATISFIED; } else { ret = DEL_SATISFIED; @@ -646,7 +646,7 @@ int Conditions::delete_pruning_filter(const std::vector& zone_maps) co /* * the relationship between condcolumn A and B is A & B. * if any delete condition is not satisfied, the data can't be filtered. - * elseif all delete condition is satifsified, the data can be filtered. + * elseif all delete condition is satisfied, the data can be filtered. * else is the partial satisfied. */ int ret = DEL_NOT_SATISFIED; diff --git a/be/src/olap/olap_define.h b/be/src/olap/olap_define.h index cde102ca7afec2..87ea3eb0055e20 100644 --- a/be/src/olap/olap_define.h +++ b/be/src/olap/olap_define.h @@ -44,7 +44,7 @@ static const uint32_t OLAP_DEFAULT_COLUMN_STREAM_BUFFER_SIZE = 10 * 1024; // 此为百分比, 字典大小/原数据大小小于该百分比时, 启用字典编码 static const uint32_t OLAP_DEFAULT_COLUMN_DICT_KEY_SIZE_THRESHOLD = 80; // 30% // LRU Cache Key的大小 -static const size_t OLAP_LRU_CACHE_MAX_KEY_LENTH = OLAP_MAX_PATH_LEN * 2; +static const size_t OLAP_LRU_CACHE_MAX_KEY_LENGTH = OLAP_MAX_PATH_LEN * 2; static const uint64_t OLAP_FIX_HEADER_MAGIC_NUMBER = 0; // 执行be/ce时默认的候选集大小 @@ -461,7 +461,7 @@ const std::string ROWSET_ID_PREFIX = "s_"; } while (0) #ifndef BUILD_VERSION -#define BUILD_VERSION "Unknow" +#define BUILD_VERSION "Unknown" #endif } // namespace doris diff --git a/be/src/olap/olap_index.cpp b/be/src/olap/olap_index.cpp index bd9ac44b91cd2f..bfaed20d26d67b 100644 --- a/be/src/olap/olap_index.cpp +++ b/be/src/olap/olap_index.cpp @@ -170,7 +170,7 @@ OLAPStatus MemIndex::load_segment(const char* file, size_t *current_num_rows_per } /* - * convert storage layout to memory layout for olap/ndex + * convert storage layout to memory layout for olap/index * In this procedure, string type(Varchar/Char) should be * converted with caution. Hyperloglog type will not be * key, it can not to be handled. @@ -450,7 +450,7 @@ OLAPStatus MemIndex::get_row_block_position( if (pos.segment >= segment_count() || pos.offset >= _meta[pos.segment].count()) { OLAP_LOG_WARNING("fail to get RowBlockPosition from OLAPIndexOffset. " - "[IndexOffse={segment=%u offset=%u} segment_count=%lu items_count=%lu]", + "[IndexOffset={segment=%u offset=%u} segment_count=%lu items_count=%lu]", pos.segment, pos.offset, segment_count(), diff --git a/be/src/olap/olap_meta.cpp b/be/src/olap/olap_meta.cpp index f9ca9775b11dc2..51d005a9dd931d 100755 --- a/be/src/olap/olap_meta.cpp +++ b/be/src/olap/olap_meta.cpp @@ -73,7 +73,7 @@ OLAPStatus OlapMeta::init() { column_families.emplace_back(DEFAULT_COLUMN_FAMILY, ColumnFamilyOptions()); column_families.emplace_back(DORIS_COLUMN_FAMILY, ColumnFamilyOptions()); - // meta column family add prefix extrator to improve performance and ensure correctness + // meta column family add prefix extractor to improve performance and ensure correctness ColumnFamilyOptions meta_column_family; meta_column_family.prefix_extractor.reset(NewFixedPrefixTransform(PREFIX_LENGTH)); column_families.emplace_back(META_COLUMN_FAMILY, meta_column_family); diff --git a/be/src/olap/olap_server.cpp b/be/src/olap/olap_server.cpp index 57ed54d58a75c0..0aa8867c56cd7a 100644 --- a/be/src/olap/olap_server.cpp +++ b/be/src/olap/olap_server.cpp @@ -95,7 +95,7 @@ Status StorageEngine::start_bg_threads() { &tablet_checkpoint_thread)); _tablet_checkpoint_threads.emplace_back(tablet_checkpoint_thread); } - LOG(INFO) << "tablet checkpint thread started"; + LOG(INFO) << "tablet checkpoint thread started"; // fd cache clean thread RETURN_IF_ERROR( @@ -124,7 +124,7 @@ Status StorageEngine::start_bg_threads() { LOG(INFO) << "path scan/gc threads started. number:" << get_stores().size(); } - LOG(INFO) << "all storage engine's backgroud threads are started."; + LOG(INFO) << "all storage engine's background threads are started."; return Status::OK(); } diff --git a/be/src/olap/olap_snapshot_converter.cpp b/be/src/olap/olap_snapshot_converter.cpp index 77c47880b94407..0c4c159191ad14 100755 --- a/be/src/olap/olap_snapshot_converter.cpp +++ b/be/src/olap/olap_snapshot_converter.cpp @@ -74,7 +74,7 @@ OLAPStatus OlapSnapshotConverter::to_olap_header(const TabletMetaPB& tablet_meta *delete_condition = pdelta->delete_condition(); } } - // not add pending delta, it is usedless in clone or backup restore + // not add pending delta, it is useless in clone or backup restore for (auto& inc_rs_meta : tablet_meta_pb.inc_rs_metas()) { PDelta* pdelta = olap_header->add_incremental_delta(); convert_to_pdelta(inc_rs_meta, pdelta); diff --git a/be/src/olap/out_stream.cpp b/be/src/olap/out_stream.cpp index 972525b858ebf4..9d8c9403fa717d 100644 --- a/be/src/olap/out_stream.cpp +++ b/be/src/olap/out_stream.cpp @@ -236,7 +236,7 @@ OLAPStatus OutStream::_spill() { if (head_pos != 0) { // 之前_compressed里有数据, 这种情况下先输出compressed, - // 此时_oversflow一定是空的 + // 此时_overflow一定是空的 _output_compressed(); } diff --git a/be/src/olap/page_cache.h b/be/src/olap/page_cache.h index cc2cd2cd3d1b2d..f7aa7b2f2c1d2f 100644 --- a/be/src/olap/page_cache.h +++ b/be/src/olap/page_cache.h @@ -28,7 +28,7 @@ namespace doris { class PageCacheHandle; -// Warpper around Cache, and used for cache page of column datas +// Wrapper around Cache, and used for cache page of column data // in Segment. // TODO(zc): We should add some metric to see cache hit/miss rate. class StoragePageCache { @@ -71,7 +71,7 @@ class StoragePageCache { bool lookup(const CacheKey& key, PageCacheHandle* handle); // Insert a page with key into this cache. - // Given hanlde will be set to valid reference. + // Given handle will be set to valid reference. // This function is thread-safe, and when two clients insert two same key // concurrently, this function can assure that only one page is cached. // The in_memory page will have higher priority. diff --git a/be/src/olap/push_handler.cpp b/be/src/olap/push_handler.cpp index 07801d217c6824..b1df06a5e180fb 100644 --- a/be/src/olap/push_handler.cpp +++ b/be/src/olap/push_handler.cpp @@ -102,7 +102,7 @@ OLAPStatus PushHandler::_do_streaming_ingestion( // prepare txn will be always successful // if current tablet is under schema change, origin tablet is successful and - // new tablet is not sucessful, it maybe a fatal error because new tablet has + // new tablet is not successful, it maybe a fatal error because new tablet has // not load successfully // only when fe sends schema_change true, should consider to push related @@ -446,7 +446,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, #ifndef DORIS_WITH_LZO if (need_decompress) { - // if lzo is diabled, compressed data is not allowed here + // if lzo is disabled, compressed data is not allowed here res = OLAP_ERR_LZO_DISABLED; break; } diff --git a/be/src/olap/reader.cpp b/be/src/olap/reader.cpp index f925b1073a01d4..1d616c187a34a1 100644 --- a/be/src/olap/reader.cpp +++ b/be/src/olap/reader.cpp @@ -415,7 +415,7 @@ OLAPStatus Reader::_unique_key_next_row(RowCursor* row_cursor, MemPool* mem_pool return OLAP_SUCCESS; } cur_delete_flag = _next_delete_flag; - // the verion is in reverse order, the first row is the highest version, + // the version is in reverse order, the first row is the highest version, // in UNIQUE_KEY highest version is the final result, there is no need to // merge the lower versions direct_copy_row(row_cursor, *_next_key); @@ -643,7 +643,7 @@ OLAPStatus Reader::_init_return_columns(const ReaderParams& read_params) { _value_cids.push_back(i); } } - VLOG(3) << "return column is empty, using full column as defaut."; + VLOG(3) << "return column is empty, using full column as default."; } else if (read_params.reader_type == READER_CHECKSUM) { _return_columns = read_params.return_columns; for (auto id : read_params.return_columns) { diff --git a/be/src/olap/reader.h b/be/src/olap/reader.h index d5d77ea6a43be1..1edd59112b5a98 100644 --- a/be/src/olap/reader.h +++ b/be/src/olap/reader.h @@ -76,7 +76,7 @@ struct ReaderParams { void check_validation() const { if (UNLIKELY(version.first == -1)) { - LOG(FATAL) << "verison is not set. tablet=" << tablet->full_name(); + LOG(FATAL) << "version is not set. tablet=" << tablet->full_name(); } } diff --git a/be/src/olap/row.h b/be/src/olap/row.h index bfc11ef5c91fd2..f37d056086ee53 100644 --- a/be/src/olap/row.h +++ b/be/src/olap/row.h @@ -115,7 +115,7 @@ void init_row_with_others(DstRowType* dst, const SrcRowType& src, MemPool* mem_p } // Copy other row to destination directly. This function assume -// that destination has enough space for source conetent. +// that destination has enough space for source content. template void direct_copy_row(DstRowType* dst, const SrcRowType& src) { for (auto cid : dst->schema()->column_ids()) { @@ -173,7 +173,7 @@ void agg_update_row_with_sequence(DstRowType* dst, const SrcRowType& src, uint32 } // Do aggregate update source row to destination row. -// This funcion will operate on given cids. +// This function will operate on given cids. // TODO(zc): unify two versions of agg_update_row template void agg_update_row(const std::vector& cids, DstRowType* dst, const SrcRowType& src) { diff --git a/be/src/olap/row_block.cpp b/be/src/olap/row_block.cpp index b04d625f0ea50c..b99989193b37fc 100644 --- a/be/src/olap/row_block.cpp +++ b/be/src/olap/row_block.cpp @@ -58,7 +58,7 @@ void RowBlock::init(const RowBlockInfo& block_info) { OLAPStatus RowBlock::finalize(uint32_t row_num) { if (row_num > _capacity) { - OLAP_LOG_WARNING("Intput row num is larger than internal row num." + OLAP_LOG_WARNING("Input row num is larger than internal row num." "[row_num=%u; _info.row_num=%u]", row_num, _info.row_num); diff --git a/be/src/olap/rowset/rowset_meta.h b/be/src/olap/rowset/rowset_meta.h index 882f46609f28cc..0022c301292f13 100644 --- a/be/src/olap/rowset/rowset_meta.h +++ b/be/src/olap/rowset/rowset_meta.h @@ -385,7 +385,7 @@ class RowsetMeta { // ATTN(cmy): the num segments should be read from rowset meta pb. // But the previous code error caused this value not to be set in some cases. // So when init the rowset meta and find that the num_segments is 0(not set), - // we will try to calculate the num segmengts from AlphaRowsetExtraMetaPB, + // we will try to calculate the num segments from AlphaRowsetExtraMetaPB, // and then set the num_segments field. // This should only happen in some rowsets converted from old version. // and for all newly created rowsets, the num_segments field must be set. diff --git a/be/src/olap/rowset/run_length_integer_reader.cpp b/be/src/olap/rowset/run_length_integer_reader.cpp index 888ae5228bd496..e76b37280ec96d 100644 --- a/be/src/olap/rowset/run_length_integer_reader.cpp +++ b/be/src/olap/rowset/run_length_integer_reader.cpp @@ -170,7 +170,7 @@ OLAPStatus RunLengthIntegerReader::_read_patched_base_values(uint8_t first_byte) res = _input->read((char*)&byte); if (OLAP_SUCCESS != res) { - OLAP_LOG_WARNING("fail to read byte from in_strem.[res=%d]", res); + OLAP_LOG_WARNING("fail to read byte from in_straem.[res=%d]", res); return res; } @@ -200,7 +200,7 @@ OLAPStatus RunLengthIntegerReader::_read_patched_base_values(uint8_t first_byte) res = _input->read(&four_byte); if (OLAP_SUCCESS != res) { - OLAP_LOG_WARNING("fail to read byte from in_strem.[res=%d]", res); + OLAP_LOG_WARNING("fail to read byte from in_straem.[res=%d]", res); return res; } diff --git a/be/src/olap/rowset/run_length_integer_writer.cpp b/be/src/olap/rowset/run_length_integer_writer.cpp index da823e93740cfc..8a3fca6db48f80 100644 --- a/be/src/olap/rowset/run_length_integer_writer.cpp +++ b/be/src/olap/rowset/run_length_integer_writer.cpp @@ -256,7 +256,7 @@ void RunLengthIntegerWriter::_prepare_patched_blob() { prev = i; gap_list[gap_idx++] = gap; - // extract the most significat bits that are over mask bits + // extract the most significant bits that are over mask bits int64_t patch = ((uint64_t)_base_reduced_literals[i]) >> _br_bits_95p; patch_list[patch_idx++] = patch; @@ -572,7 +572,7 @@ OLAPStatus RunLengthIntegerWriter::_write_values() { break; default: - OLAP_LOG_WARNING("Unknow encoding [encoding=%d]", _encoding); + OLAP_LOG_WARNING("Unknown encoding [encoding=%d]", _encoding); return OLAP_ERR_INPUT_PARAMETER_ERROR; } @@ -668,7 +668,7 @@ OLAPStatus RunLengthIntegerWriter::write(int64_t value) { } } - // after writing values re-intialize + // after writing values re-initialize if (_num_literals == 0) { _init_literals(value); } else { diff --git a/be/src/olap/rowset/segment_reader.cpp b/be/src/olap/rowset/segment_reader.cpp index b01f476c3145d7..941b6043af2096 100644 --- a/be/src/olap/rowset/segment_reader.cpp +++ b/be/src/olap/rowset/segment_reader.cpp @@ -198,13 +198,13 @@ OLAPStatus SegmentReader::init(bool is_using_cache) { OLAPStatus res = OLAP_SUCCESS; res = _load_segment_file(); if (OLAP_SUCCESS != res) { - OLAP_LOG_WARNING("fail to load sgment file. "); + OLAP_LOG_WARNING("fail to load segment file. "); return res; } // 文件头 res = _set_segment_info(); if (OLAP_SUCCESS != res) { - OLAP_LOG_WARNING("fail to set sgment info. "); + OLAP_LOG_WARNING("fail to set segment info. "); return res; } @@ -625,7 +625,7 @@ OLAPStatus SegmentReader::_load_index(bool is_using_cache) { FieldType type = _get_field_type_by_index(table_column_id); char* stream_buffer = NULL; - char key_buf[OLAP_LRU_CACHE_MAX_KEY_LENTH]; + char key_buf[OLAP_LRU_CACHE_MAX_KEY_LENGTH]; CacheKey key = _construct_index_stream_key(key_buf, sizeof(key_buf), _file_handler.file_name(), diff --git a/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp b/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp index a70255296ebd35..4afc319f2a4c95 100644 --- a/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp +++ b/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp @@ -37,7 +37,7 @@ Status IndexedColumnReader::load(bool use_page_cache, bool kept_in_memory) { } RETURN_IF_ERROR(EncodingInfo::get(_type_info, _meta.encoding(), &_encoding_info)); RETURN_IF_ERROR(get_block_compression_codec(_meta.compression(), &_compress_codec)); - _validx_key_coder = get_key_coder(_type_info->type()); + _value_key_coder = get_key_coder(_type_info->type()); std::unique_ptr rblock; fs::BlockManager* block_mgr = fs::fs_util::block_manager(); @@ -159,7 +159,7 @@ Status IndexedColumnIterator::seek_at_or_after(const void* key, bool* exact_matc if (_reader->_has_index_page) { // seek index to determine the data page to seek std::string encoded_key; - _reader->_validx_key_coder->full_encode_ascending(key, &encoded_key); + _reader->_value_key_coder->full_encode_ascending(key, &encoded_key); RETURN_IF_ERROR(_value_iter.seek_at_or_before(encoded_key)); data_page_pp = _value_iter.current_page_pointer(); _current_iter = &_value_iter; diff --git a/be/src/olap/rowset/segment_v2/indexed_column_reader.h b/be/src/olap/rowset/segment_v2/indexed_column_reader.h index 46072f0d03a907..1b887f90a21aa1 100644 --- a/be/src/olap/rowset/segment_v2/indexed_column_reader.h +++ b/be/src/olap/rowset/segment_v2/indexed_column_reader.h @@ -87,7 +87,7 @@ class IndexedColumnReader { const TypeInfo* _type_info = nullptr; const EncodingInfo* _encoding_info = nullptr; const BlockCompressionCodec* _compress_codec = nullptr; - const KeyCoder* _validx_key_coder = nullptr; + const KeyCoder* _value_key_coder = nullptr; }; class IndexedColumnIterator { diff --git a/be/src/olap/rowset/segment_v2/indexed_column_writer.cpp b/be/src/olap/rowset/segment_v2/indexed_column_writer.cpp index 1a4bf2dc369a50..20730ae7953f64 100644 --- a/be/src/olap/rowset/segment_v2/indexed_column_writer.cpp +++ b/be/src/olap/rowset/segment_v2/indexed_column_writer.cpp @@ -45,7 +45,7 @@ IndexedColumnWriter::IndexedColumnWriter(const IndexedColumnWriterOptions& optio _mem_pool(_mem_tracker.get()), _num_values(0), _num_data_pages(0), - _validx_key_coder(nullptr), + _value_key_coder(nullptr), _compress_codec(nullptr) { _first_value.resize(_typeinfo->size()); } @@ -69,7 +69,7 @@ Status IndexedColumnWriter::init() { } if (_options.write_value_index) { _value_index_builder.reset(new IndexPageBuilder(_options.index_page_size, true)); - _validx_key_coder = get_key_coder(_typeinfo->type()); + _value_key_coder = get_key_coder(_typeinfo->type()); } if (_options.compression != NO_COMPRESSION) { @@ -123,7 +123,7 @@ Status IndexedColumnWriter::_finish_current_data_page() { if (_options.write_value_index) { std::string key; - _validx_key_coder->full_encode_ascending(_first_value.data(), &key); + _value_key_coder->full_encode_ascending(_first_value.data(), &key); // TODO short separate key optimize _value_index_builder->add(key, _last_data_page); // TODO record last key in short separate key optimize diff --git a/be/src/olap/rowset/segment_v2/indexed_column_writer.h b/be/src/olap/rowset/segment_v2/indexed_column_writer.h index d2704d1069041f..77d093b7c87990 100644 --- a/be/src/olap/rowset/segment_v2/indexed_column_writer.h +++ b/be/src/olap/rowset/segment_v2/indexed_column_writer.h @@ -110,7 +110,7 @@ class IndexedColumnWriter { // builder for index pages of value index, null if write_value_index == false std::unique_ptr _value_index_builder; // encoder for value index's key - const KeyCoder* _validx_key_coder; + const KeyCoder* _value_key_coder; const BlockCompressionCodec* _compress_codec; DISALLOW_COPY_AND_ASSIGN(IndexedColumnWriter); diff --git a/be/src/olap/rowset/segment_v2/page_builder.h b/be/src/olap/rowset/segment_v2/page_builder.h index 5280ef26a68b9d..bc2c9f415e5bd8 100644 --- a/be/src/olap/rowset/segment_v2/page_builder.h +++ b/be/src/olap/rowset/segment_v2/page_builder.h @@ -50,7 +50,7 @@ class PageBuilder { // than requested if the page is full. // // vals size should be decided according to the page build type - // TODO make sure vals is natually-aligned to its type so that impls can use aligned load + // TODO make sure vals is naturally-aligned to its type so that impls can use aligned load // instead of memcpy to copy values. virtual Status add(const uint8_t* vals, size_t* count) = 0; diff --git a/be/src/olap/rowset/segment_v2/plain_page.h b/be/src/olap/rowset/segment_v2/plain_page.h index d3056de7605f7a..6908b0941194fe 100644 --- a/be/src/olap/rowset/segment_v2/plain_page.h +++ b/be/src/olap/rowset/segment_v2/plain_page.h @@ -124,7 +124,7 @@ class PlainPageDecoder : public PageDecoder { if (_data.size < PLAIN_PAGE_HEADER_SIZE) { std::stringstream ss; - ss << "file corrupton: not enough bytes for header in PlainPageDecoder ." + ss << "file corruption: not enough bytes for header in PlainPageDecoder ." "invalid data size:" << _data.size << ", header size:" << PLAIN_PAGE_HEADER_SIZE; return Status::InternalError(ss.str()); } @@ -133,7 +133,7 @@ class PlainPageDecoder : public PageDecoder { if (_data.size != PLAIN_PAGE_HEADER_SIZE + _num_elems * SIZE_OF_TYPE) { std::stringstream ss; - ss << "file corrupton: unexpected data size."; + ss << "file corruption: unexpected data size."; return Status::InternalError(ss.str()); } diff --git a/be/src/olap/schema.h b/be/src/olap/schema.h index 63b858c35cbab6..a766e8cd782a34 100644 --- a/be/src/olap/schema.h +++ b/be/src/olap/schema.h @@ -115,7 +115,7 @@ class Schema { return _col_offsets[cid]; } - // TODO(lingbin): What is the difference between colun_size() and index_size() + // TODO(lingbin): What is the difference between column_size() and index_size() size_t column_size(ColumnId cid) const { return _cols[cid]->size(); } diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp index cdfab21346a276..a85cce44183855 100644 --- a/be/src/olap/schema_change.cpp +++ b/be/src/olap/schema_change.cpp @@ -1423,7 +1423,7 @@ OLAPStatus SchemaChangeHandler::_do_process_alter_tablet_v2(const TAlterTabletRe if (new_tablet->tablet_state() != TABLET_NOTREADY) { res = _validate_alter_result(new_tablet, request); LOG(INFO) << "tablet's state=" << new_tablet->tablet_state() - << " the convert job alreay finished, check its version" + << " the convert job already finished, check its version" << " res=" << res; return res; } @@ -1938,7 +1938,7 @@ OLAPStatus SchemaChangeHandler::_convert_historical_rowsets(const SchemaChangePa } res = sc_params.new_tablet->add_rowset(new_rowset, false); if (res == OLAP_ERR_PUSH_VERSION_ALREADY_EXIST) { - LOG(WARNING) << "version already exist, version revert occured. " + LOG(WARNING) << "version already exist, version revert occurred. " << "tablet=" << sc_params.new_tablet->full_name() << ", version='" << rs_reader->version().first << "-" << rs_reader->version().second; StorageEngine::instance()->add_unused_rowset(new_rowset); @@ -2179,7 +2179,7 @@ OLAPStatus SchemaChangeHandler::_validate_alter_result(TabletSharedPtr new_table const TAlterTabletReqV2& request) { Version max_continuous_version = {-1, 0}; VersionHash max_continuous_version_hash = 0; - new_tablet->max_continuous_version_from_begining(&max_continuous_version, + new_tablet->max_continuous_version_from_beginning(&max_continuous_version, &max_continuous_version_hash); LOG(INFO) << "find max continuous version of tablet=" << new_tablet->full_name() << ", start_version=" << max_continuous_version.first diff --git a/be/src/olap/schema_change.h b/be/src/olap/schema_change.h index 7aebe7e3bced67..8835a1e8394b3f 100644 --- a/be/src/olap/schema_change.h +++ b/be/src/olap/schema_change.h @@ -130,7 +130,7 @@ class LinkedSchemaChange : public SchemaChange { class SchemaChangeDirectly : public SchemaChange { public: // @params tablet the instance of tablet which has new schema. - // @params row_block_changer changer to modifiy the data of RowBlock + // @params row_block_changer changer to modify the data of RowBlock explicit SchemaChangeDirectly(const RowBlockChanger& row_block_changer); virtual ~SchemaChangeDirectly(); diff --git a/be/src/olap/skiplist.h b/be/src/olap/skiplist.h index 014053b8e45138..b5721a893bc486 100644 --- a/be/src/olap/skiplist.h +++ b/be/src/olap/skiplist.h @@ -408,7 +408,7 @@ void SkipList::InsertWithHint(const Key& key, bool is_exist, Hin DCHECK(!is_exist || x) << "curr pointer must not be null if row exists"; #ifndef BE_TEST - // The key already exists and duplicate keys are not allowed, so we need to aggreage them + // The key already exists and duplicate keys are not allowed, so we need to aggregate them if (!_can_dup && is_exist) { return; } diff --git a/be/src/olap/snapshot_manager.cpp b/be/src/olap/snapshot_manager.cpp index 6dc549659284a8..f77528bf769c34 100644 --- a/be/src/olap/snapshot_manager.cpp +++ b/be/src/olap/snapshot_manager.cpp @@ -376,19 +376,19 @@ OLAPStatus SnapshotManager::_create_snapshot_files( } else { ReadLock rdlock(ref_tablet->get_header_lock_ptr()); // get latest version - const RowsetSharedPtr lastest_version = ref_tablet->rowset_with_max_version(); - if (lastest_version == nullptr) { + const RowsetSharedPtr last_version = ref_tablet->rowset_with_max_version(); + if (last_version == nullptr) { LOG(WARNING) << "tablet has not any version. path=" << ref_tablet->full_name().c_str(); res = OLAP_ERR_VERSION_NOT_EXIST; break; } // get snapshot version, use request.version if specified - int32_t version = lastest_version->end_version(); + int32_t version = last_version->end_version(); if (request.__isset.version) { - if (lastest_version->end_version() < request.version) { + if (last_version->end_version() < request.version) { LOG(WARNING) << "invalid make snapshot request. " - << " version=" << lastest_version->end_version() + << " version=" << last_version->end_version() << " req_version=" << request.version; res = OLAP_ERR_INPUT_PARAMETER_ERROR; break; diff --git a/be/src/olap/storage_engine.cpp b/be/src/olap/storage_engine.cpp index ade3719380d76b..adbf9a5e43245c 100644 --- a/be/src/olap/storage_engine.cpp +++ b/be/src/olap/storage_engine.cpp @@ -488,7 +488,7 @@ bool StorageEngine::_delete_tablets_on_unused_root_path() { } void StorageEngine::stop() { - // trigger the waitting threads + // trigger the waiting threads notify_listeners(); std::lock_guard l(_store_lock); diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp index d21c2f3f640bed..2b7ac865ff11a1 100644 --- a/be/src/olap/tablet.cpp +++ b/be/src/olap/tablet.cpp @@ -222,7 +222,7 @@ OLAPStatus Tablet::add_rowset(RowsetSharedPtr rowset, bool need_persist) { if (_contains_rowset(rowset->rowset_id())) { return OLAP_SUCCESS; } - // Otherwise, the version shoud be not contained in any existing rowset. + // Otherwise, the version should be not contained in any existing rowset. RETURN_NOT_OK(_contains_version(rowset->version())); RETURN_NOT_OK(_tablet_meta->add_rs_meta(rowset->rowset_meta())); @@ -475,18 +475,18 @@ void Tablet::delete_expired_stale_rowset() { // 2.1 check whether missed_versions and after_missed_versions are the same. // when they are the same, it means we can delete the path securely. - bool is_missng = missed_versions.size() != after_missed_versions.size(); + bool is_missing = missed_versions.size() != after_missed_versions.size(); - if (!is_missng) { + if (!is_missing) { for (int ver_index = 0; ver_index < missed_versions.size(); ver_index++) { if(missed_versions[ver_index] != after_missed_versions[ver_index]) { - is_missng = true; + is_missing = true; break; } } } - if (is_missng) { + if (is_missing) { LOG(WARNING) << "The consistent version check fails, there are bugs. " << "Reconstruct the tracker to recover versions in tablet=" << tablet_id(); @@ -499,20 +499,20 @@ void Tablet::delete_expired_stale_rowset() { calc_missed_versions_unlocked(lastest_delta->end_version(), &recover_missed_versions); // 4.1 check whether missed_versions and recover_missed_versions are the same. - // when they are the same, it means we recover successlly. - bool is_recover_missng = missed_versions.size() != recover_missed_versions.size(); + // when they are the same, it means we recover successfully. + bool is_recover_missing = missed_versions.size() != recover_missed_versions.size(); - if (!is_recover_missng) { + if (!is_recover_missing) { for (int ver_index = 0; ver_index < missed_versions.size(); ver_index++) { if(missed_versions[ver_index] != recover_missed_versions[ver_index]) { - is_recover_missng = true; + is_recover_missing = true; break; } } } // 5. check recover fail, version is mission - if (is_recover_missng) { + if (is_recover_missing) { if (!config::ignore_rowset_stale_unconsistent_delete) { LOG(FATAL) << "rowset stale unconsistent delete. tablet= " << tablet_id(); } else { @@ -838,13 +838,13 @@ void Tablet::calc_missed_versions_unlocked(int64_t spec_version, } } -void Tablet::max_continuous_version_from_begining(Version* version, +void Tablet::max_continuous_version_from_beginning(Version* version, VersionHash* v_hash) { ReadLock rdlock(&_meta_lock); - _max_continuous_version_from_begining_unlocked(version, v_hash); + _max_continuous_version_from_beginning_unlocked(version, v_hash); } -void Tablet::_max_continuous_version_from_begining_unlocked(Version* version, +void Tablet::_max_continuous_version_from_beginning_unlocked(Version* version, VersionHash* v_hash) const { vector> existing_versions; for (auto& rs : _tablet_meta->all_rs_metas()) { @@ -1064,13 +1064,13 @@ TabletInfo Tablet::get_tablet_info() const { return TabletInfo(tablet_id(), schema_hash(), tablet_uid()); } -void Tablet::pick_candicate_rowsets_to_cumulative_compaction( +void Tablet::pick_candidate_rowsets_to_cumulative_compaction( int64_t skip_window_sec, std::vector* candidate_rowsets) { ReadLock rdlock(&_meta_lock); - _cumulative_compaction_policy->pick_candicate_rowsets(skip_window_sec, _rs_version_map, _cumulative_point, candidate_rowsets); + _cumulative_compaction_policy->pick_candidate_rowsets(skip_window_sec, _rs_version_map, _cumulative_point, candidate_rowsets); } -void Tablet::pick_candicate_rowsets_to_base_compaction(vector* candidate_rowsets) { +void Tablet::pick_candidate_rowsets_to_base_compaction(vector* candidate_rowsets) { ReadLock rdlock(&_meta_lock); for (auto& it : _rs_version_map) { if (it.first.first < _cumulative_point) { @@ -1261,7 +1261,7 @@ void Tablet::build_tablet_report_info(TTabletInfo* tablet_info) { tablet_info->data_size = _tablet_meta->tablet_footprint(); Version version = { -1, 0 }; VersionHash v_hash = 0; - _max_continuous_version_from_begining_unlocked(&version, &v_hash); + _max_continuous_version_from_beginning_unlocked(&version, &v_hash); auto max_rowset = rowset_with_max_version(); if (max_rowset != nullptr) { if (max_rowset->version() != version) { diff --git a/be/src/olap/tablet.h b/be/src/olap/tablet.h index ddf2248e6ebfb1..07863824702d08 100644 --- a/be/src/olap/tablet.h +++ b/be/src/olap/tablet.h @@ -78,7 +78,7 @@ class Tablet : public BaseTablet { inline Version max_version() const; inline CumulativeCompactionPolicy* cumulative_compaction_policy(); - // propreties encapsulated in TabletSchema + // properties encapsulated in TabletSchema inline KeysType keys_type() const; inline size_t num_columns() const; inline size_t num_null_columns() const; @@ -106,7 +106,7 @@ class Tablet : public BaseTablet { OLAPStatus add_inc_rowset(const RowsetSharedPtr& rowset); void delete_expired_inc_rowsets(); - /// Delete stale rowset by timing. This delete policy uses now() munis + /// Delete stale rowset by timing. This delete policy uses now() minutes /// config::tablet_rowset_expired_stale_sweep_time_sec to compute the deadline of expired rowset /// to delete. When rowset is deleted, it will be added to StorageEngine unused map and record /// need to delete flag. @@ -174,9 +174,9 @@ class Tablet : public BaseTablet { void calc_missed_versions_unlocked(int64_t spec_version, vector* missed_versions) const; - // This function to find max continous version from the beginning. + // This function to find max continuous version from the beginning. // For example: If there are 1, 2, 3, 5, 6, 7 versions belongs tablet, then 3 is target. - void max_continuous_version_from_begining(Version* version, VersionHash* v_hash); + void max_continuous_version_from_beginning(Version* version, VersionHash* v_hash); // operation for query OLAPStatus split_range( @@ -208,9 +208,9 @@ class Tablet : public BaseTablet { TabletInfo get_tablet_info() const; - void pick_candicate_rowsets_to_cumulative_compaction( + void pick_candidate_rowsets_to_cumulative_compaction( int64_t skip_window_sec, std::vector* candidate_rowsets); - void pick_candicate_rowsets_to_base_compaction(std::vector* candidate_rowsets); + void pick_candidate_rowsets_to_base_compaction(std::vector* candidate_rowsets); void calculate_cumulative_point(); // TODO(ygl): @@ -239,7 +239,7 @@ class Tablet : public BaseTablet { void _print_missed_versions(const std::vector& missed_versions) const; bool _contains_rowset(const RowsetId rowset_id); OLAPStatus _contains_version(const Version& version); - void _max_continuous_version_from_begining_unlocked(Version* version, + void _max_continuous_version_from_beginning_unlocked(Version* version, VersionHash* v_hash) const ; RowsetSharedPtr _rowset_with_largest_size(); void _delete_inc_rowset_by_version(const Version& version, const VersionHash& version_hash); diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp index bf7d09890dac89..4146c7732f3195 100644 --- a/be/src/olap/tablet_manager.cpp +++ b/be/src/olap/tablet_manager.cpp @@ -315,8 +315,8 @@ TabletSharedPtr TabletManager::_internal_create_tablet_unlocked( // Create init version if this is not a restore mode replica and request.version is set // bool in_restore_mode = request.__isset.in_restore_mode && request.in_restore_mode; // if (!in_restore_mode && request.__isset.version) { - // create inital rowset before add it to storage engine could omit many locks - res = _create_inital_rowset_unlocked(request, tablet.get()); + // create initial rowset before add it to storage engine could omit many locks + res = _create_initial_rowset_unlocked(request, tablet.get()); if (res != OLAP_SUCCESS) { LOG(WARNING) << "fail to create initial version for tablet. res=" << res; break; @@ -326,7 +326,7 @@ TabletSharedPtr TabletManager::_internal_create_tablet_unlocked( if (request.__isset.base_tablet_id && request.base_tablet_id > 0) { LOG(INFO) << "request for alter-tablet v2, do not add alter task to tablet"; // if this is a new alter tablet, has to set its state to not ready - // because schema change hanlder depends on it to check whether history data + // because schema change handler depends on it to check whether history data // convert finished tablet->set_tablet_state(TabletState::TABLET_NOTREADY); } else { @@ -349,7 +349,7 @@ TabletSharedPtr TabletManager::_internal_create_tablet_unlocked( tablet->set_creation_time(new_creation_time); } } - // Add tablet to StorageEngine will make it visiable to user + // Add tablet to StorageEngine will make it visible to user res = _add_tablet_unlocked(new_tablet_id, new_schema_hash, tablet, true, false); if (res != OLAP_SUCCESS) { LOG(WARNING) << "fail to add tablet to StorageEngine. res=" << res; @@ -405,7 +405,7 @@ TabletSharedPtr TabletManager::_create_tablet_meta_and_dir_unlocked( last_dir = data_dir; TabletMetaSharedPtr tablet_meta; - // if create meta faild, do not need to clean dir, because it is only in memory + // if create meta failed, do not need to clean dir, because it is only in memory OLAPStatus res = _create_tablet_meta_unlocked( request, data_dir, is_schema_change, base_tablet, &tablet_meta); if (res != OLAP_SUCCESS) { @@ -461,7 +461,7 @@ OLAPStatus TabletManager::_drop_tablet_unlocked( LOG(INFO) << "begin drop tablet. tablet_id=" << tablet_id << ", schema_hash=" << schema_hash; DorisMetrics::instance()->drop_tablet_requests_total->increment(1); - // Fetch tablet which need to be droped + // Fetch tablet which need to be dropped TabletSharedPtr to_drop_tablet = _get_tablet_unlocked(tablet_id, schema_hash); if (to_drop_tablet == nullptr) { LOG(WARNING) << "fail to drop tablet because it does not exist. " @@ -884,7 +884,7 @@ OLAPStatus TabletManager::load_tablet_from_dir(DataDir* store, TTabletId tablet_ LOG(WARNING) << "fail to load tablet_meta. file_path=" << header_path; return OLAP_ERR_ENGINE_LOAD_INDEX_TABLE_ERROR; } - // has to change shard id here, because meta file maybe copyed from other source + // has to change shard id here, because meta file maybe copied from other source // its shard is different from local shard tablet_meta->set_shard_id(shard); string meta_binary; @@ -1091,7 +1091,7 @@ OLAPStatus TabletManager::start_trash_sweep() { } } - // yield to avoid hoding _tablet_map_lock for too long + // yield to avoid holding _tablet_map_lock for too long if (clean_num >= 200) { break; } @@ -1249,7 +1249,7 @@ void TabletManager::_build_tablet_stat() { } } -OLAPStatus TabletManager::_create_inital_rowset_unlocked(const TCreateTabletReq& request, +OLAPStatus TabletManager::_create_initial_rowset_unlocked(const TCreateTabletReq& request, Tablet* tablet) { OLAPStatus res = OLAP_SUCCESS; if (request.version < 1) { diff --git a/be/src/olap/tablet_manager.h b/be/src/olap/tablet_manager.h index dda2f9ef207990..6222c53746c3f0 100644 --- a/be/src/olap/tablet_manager.h +++ b/be/src/olap/tablet_manager.h @@ -157,7 +157,7 @@ class TabletManager { bool keep_files, bool drop_old); bool _check_tablet_id_exist_unlocked(TTabletId tablet_id); - OLAPStatus _create_inital_rowset_unlocked(const TCreateTabletReq& request, + OLAPStatus _create_initial_rowset_unlocked(const TCreateTabletReq& request, Tablet* tablet); OLAPStatus _drop_tablet_directly_unlocked(TTabletId tablet_id, diff --git a/be/src/olap/tablet_meta_manager.cpp b/be/src/olap/tablet_meta_manager.cpp index 6935c2581ed8ba..b6a1b2011e7c12 100755 --- a/be/src/olap/tablet_meta_manager.cpp +++ b/be/src/olap/tablet_meta_manager.cpp @@ -138,7 +138,7 @@ OLAPStatus TabletMetaManager::traverse_headers(OlapMeta* meta, auto traverse_header_func = [&func](const std::string& key, const std::string& value) -> bool { std::vector parts; // old format key format: "hdr_" + tablet_id + "_" + schema_hash 0.11 - // new format key format: "tabletmata_" + tablet_id + "_" + schema_hash 0.10 + // new format key format: "tabletmeta_" + tablet_id + "_" + schema_hash 0.10 split_string(key, '_', &parts); if (parts.size() != 3) { LOG(WARNING) << "invalid tablet_meta key:" << key << ", split size:" << parts.size(); diff --git a/be/src/olap/task/engine_batch_load_task.h b/be/src/olap/task/engine_batch_load_task.h index 51a003d3b4fc6d..d6d6976a233062 100644 --- a/be/src/olap/task/engine_batch_load_task.h +++ b/be/src/olap/task/engine_batch_load_task.h @@ -59,7 +59,7 @@ class EngineBatchLoadTask : public EngineTask{ // but not actually deleted util delay_delete_time run out. // // @param [in] request specify tablet and delete conditions - // @param [out] tablet_info_vec return tablet lastest status, which + // @param [out] tablet_info_vec return tablet last status, which // include version info, row count, data size, etc // @return OLAP_SUCCESS if submit delete_data success virtual OLAPStatus _delete_data(const TPushReq& request, diff --git a/be/src/olap/task/engine_clone_task.cpp b/be/src/olap/task/engine_clone_task.cpp index ba4a6c753f5c8d..22d1f82fd633c7 100644 --- a/be/src/olap/task/engine_clone_task.cpp +++ b/be/src/olap/task/engine_clone_task.cpp @@ -275,7 +275,7 @@ void EngineCloneTask::_set_tablet_info(AgentStatus status, bool is_new_tablet) { _clone_req.schema_hash); if (drop_status != OLAP_SUCCESS && drop_status != OLAP_ERR_TABLE_NOT_FOUND) { // just log - LOG(WARNING) << "drop stale cloned table failed! tabelt id: " << _clone_req.tablet_id; + LOG(WARNING) << "drop stale cloned table failed! tablet id: " << _clone_req.tablet_id; } } status = DORIS_ERROR; @@ -312,7 +312,7 @@ AgentStatus EngineCloneTask::_clone_copy( // Make snapshot in remote olap engine *src_host = src; int32_t snapshot_version = 0; - // make snapsthot + // make snapshot auto st = _make_snapshot(src.host, src.be_port, _clone_req.tablet_id, _clone_req.schema_hash, timeout_s, @@ -487,7 +487,7 @@ Status EngineCloneTask::_download_files( RETURN_IF_ERROR(HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, list_files_cb)); vector file_name_list = strings::Split(file_list_str, "\n", strings::SkipWhitespace()); - // If the header file is not exist, the table could't loaded by olap engine. + // If the header file is not exist, the table couldn't loaded by olap engine. // Avoid of data is not complete, we copy the header file at last. // The header file's name is end of .hdr. for (int i = 0; i < file_name_list.size() - 1; ++i) { @@ -845,7 +845,7 @@ OLAPStatus EngineCloneTask::_clone_full_data(Tablet* tablet, TabletMeta* cloned_ } // clone_data to tablet - // only replace rowet info, must not modify other info such as alter task info. for example + // only replace rowset info, must not modify other info such as alter task info. for example // 1. local tablet finished alter task // 2. local tablet has error in push // 3. local tablet cloned rowset from other nodes diff --git a/be/src/olap/task/engine_storage_migration_task.cpp b/be/src/olap/task/engine_storage_migration_task.cpp index c7ffa374529c9f..7e35a137906263 100644 --- a/be/src/olap/task/engine_storage_migration_task.cpp +++ b/be/src/olap/task/engine_storage_migration_task.cpp @@ -88,15 +88,15 @@ OLAPStatus EngineStorageMigrationTask::_storage_medium_migrate( do { // get all versions to be migrate tablet->obtain_header_rdlock(); - const RowsetSharedPtr lastest_version = tablet->rowset_with_max_version(); - if (lastest_version == nullptr) { + const RowsetSharedPtr last_version = tablet->rowset_with_max_version(); + if (last_version == nullptr) { tablet->release_header_lock(); res = OLAP_ERR_VERSION_NOT_EXIST; LOG(WARNING) << "tablet has not any version."; break; } - int32_t end_version = lastest_version->end_version(); + int32_t end_version = last_version->end_version(); vector consistent_rowsets; res = tablet->capture_consistent_rowsets(Version(0, end_version), &consistent_rowsets); if (consistent_rowsets.empty()) { @@ -224,7 +224,7 @@ OLAPStatus EngineStorageMigrationTask::_storage_medium_migrate( return res; } -// TODO(ygl): lost some infomation here, such as cumulative layer point +// TODO(ygl): lost some information here, such as cumulative layer point void EngineStorageMigrationTask::_generate_new_header( DataDir* store, const uint64_t new_shard, const TabletSharedPtr& tablet, diff --git a/be/src/olap/txn_manager.h b/be/src/olap/txn_manager.h index d16e4532c9b889..52dca96e067b32 100755 --- a/be/src/olap/txn_manager.h +++ b/be/src/olap/txn_manager.h @@ -133,7 +133,7 @@ class TxnManager { bool has_txn(TPartitionId partition_id, TTransactionId transaction_id, TTabletId tablet_id, SchemaHash schema_hash, TabletUid tablet_uid); - // get all expired txns and save tham in expire_txn_map. + // get all expired txns and save them in expire_txn_map. // This is currently called before reporting all tablet info, to avoid iterating txn map for every tablets. void build_expire_txn_map(std::map>* expire_txn_map); diff --git a/be/src/runtime/cache/result_cache.h b/be/src/runtime/cache/result_cache.h index c05e253e7a6738..c193372568616b 100644 --- a/be/src/runtime/cache/result_cache.h +++ b/be/src/runtime/cache/result_cache.h @@ -104,7 +104,7 @@ class ResultCache { //Single thread updating and cleaning(only single be, Fe is not affected) mutable boost::shared_mutex _cache_mtx; ResultNodeMap _node_map; - //List of result nodes corresponding to SqlKey,last recently useed at the tail + //List of result nodes corresponding to SqlKey,last recently used at the tail ResultNodeList _node_list; size_t _cache_size; size_t _max_size; diff --git a/be/src/runtime/cache/result_node.cpp b/be/src/runtime/cache/result_node.cpp index 6924baec4095c0..39ae38eb4bae34 100644 --- a/be/src/runtime/cache/result_node.cpp +++ b/be/src/runtime/cache/result_node.cpp @@ -126,7 +126,7 @@ PCacheStatus ResultNode::update_partition(const PUpdateCacheRequest* request, bo /** * Only the range query of the key of the partition is supported, and the separated partition key query is not supported. * Because a query can only be divided into two parts, part1 get data from cache, part2 fetch_data by scan node from BE. -* Partion cache : 20191211-20191215 +* Partition cache : 20191211-20191215 * Hit cache parameter : [20191211 - 20191215], [20191212 - 20191214], [20191212 - 20191216],[20191210 - 20191215] * Miss cache parameter: [20191210 - 20191216] */ diff --git a/be/src/runtime/cache/result_node.h b/be/src/runtime/cache/result_node.h index 722509de4fdf3b..bc88357c6acb41 100644 --- a/be/src/runtime/cache/result_node.h +++ b/be/src/runtime/cache/result_node.h @@ -112,7 +112,7 @@ typedef boost::unordered_map PartitionRowBatch /** * Cache the result of one SQL, include many partition rowsets. -* Sql Cache: The partiton ID comes from the partition lastest updated. +* Sql Cache: The partition ID comes from the partition last updated. * Partition Cache: The partition ID comes from the partition scanned by query. * The above two modes use the same cache structure. */ diff --git a/be/src/runtime/data_spliter.cpp b/be/src/runtime/data_spliter.cpp index f279f2ca5d6acb..62147a20e1314c 100644 --- a/be/src/runtime/data_spliter.cpp +++ b/be/src/runtime/data_spliter.cpp @@ -46,8 +46,8 @@ DataSpliter::DataSpliter(const RowDescriptor& row_desc) : DataSpliter::~DataSpliter() { } -// We use the ParttitionRange to compare here. It should not be a member function of PartitionInfo -// class becaurce there are some other member in it. +// We use the PartitionRange to compare here. It should not be a member function of PartitionInfo +// class because there are some other member in it. static bool compare_part_use_range(const PartitionInfo* v1, const PartitionInfo* v2) { return v1->range() < v2->range(); } diff --git a/be/src/runtime/data_spliter.h b/be/src/runtime/data_spliter.h index 5ea3358e9ef624..bdc0b88ed4a393 100644 --- a/be/src/runtime/data_spliter.h +++ b/be/src/runtime/data_spliter.h @@ -89,7 +89,7 @@ class DataSpliter : public DataSink { std::vector _partition_expr_ctxs; // map from range value to partition_id - // sorted in ascending orderi by range for binary search + // sorted in ascending order by range for binary search std::vector _partition_infos; // Information of rollup diff --git a/be/src/runtime/data_stream_recvr.cc b/be/src/runtime/data_stream_recvr.cc index 9fc643d8cdbf28..12099f65ce9bc3 100644 --- a/be/src/runtime/data_stream_recvr.cc +++ b/be/src/runtime/data_stream_recvr.cc @@ -222,7 +222,7 @@ void DataStreamRecvr::SenderQueue::add_batch( } // We always accept the batch regardless of buffer limit, to avoid rpc pipeline stall. - // If exceed buffer limit, we just do not respoinse ACK to client, so the client won't + // If exceed buffer limit, we just do not response ACK to client, so the client won't // send data until receive ACK. // Note that if this be needs to receive data from N BEs, the size of buffer // may reach as many as (buffer_size + n * buffer_size) diff --git a/be/src/runtime/datetime_value.cpp b/be/src/runtime/datetime_value.cpp index 29e4761e00da1b..1105eeda433ad3 100644 --- a/be/src/runtime/datetime_value.cpp +++ b/be/src/runtime/datetime_value.cpp @@ -226,7 +226,7 @@ bool DateTimeValue::from_date_str(const char* date_str, int len) { // ((YY_PART_YEAR - 1)##1231235959, YY_PART_YEAR##0101000000) invalid // ((YY_PART_YEAR)##1231235959, 99991231235959] two digits year datetime value 1970 ~ 1999 // (999991231235959, ~) valid -int64_t DateTimeValue::standardlize_timevalue(int64_t value) { +int64_t DateTimeValue::standardize_timevalue(int64_t value) { _type = TIME_DATE; if (value <= 0) { return 0; @@ -289,7 +289,7 @@ int64_t DateTimeValue::standardlize_timevalue(int64_t value) { bool DateTimeValue::from_date_int64(int64_t value) { _neg = false; - value = standardlize_timevalue(value); + value = standardize_timevalue(value); if (value <= 0) { return false; } @@ -1070,7 +1070,7 @@ static int check_word(const char* lib[], const char* str, const char* end, const return pos; } -// this method is exaclty same as fromDateFormatStr() in DateLiteral.java in FE +// this method is exactly same as fromDateFormatStr() in DateLiteral.java in FE // change this method should also change that. bool DateTimeValue::from_date_format_str( const char* format, int format_len, diff --git a/be/src/runtime/datetime_value.h b/be/src/runtime/datetime_value.h index 9471ee02385858..e8e80d71b639d6 100644 --- a/be/src/runtime/datetime_value.h +++ b/be/src/runtime/datetime_value.h @@ -488,7 +488,7 @@ class DateTimeValue { return (time << 24) + second_part; } - // To compatitable with MySQL + // To compatible with MySQL int64_t to_int64_datetime_packed() const { int64_t ymd = ((_year * 13 + _month) << 5) | _day; int64_t hms = (_hour << 12) | (_minute << 6) | _second; @@ -502,12 +502,12 @@ class DateTimeValue { return _neg ? -tmp : tmp; } - // Check wether value of field is valid. + // Check whether value of field is valid. bool check_range() const; bool check_date() const; // Used to construct from int value - int64_t standardlize_timevalue(int64_t value); + int64_t standardize_timevalue(int64_t value); // Used to convert to a string. char* append_date_string(char *to) const; diff --git a/be/src/runtime/decimal_value.cpp b/be/src/runtime/decimal_value.cpp index ea06c90c04b0c8..ae375e800576a9 100755 --- a/be/src/runtime/decimal_value.cpp +++ b/be/src/runtime/decimal_value.cpp @@ -146,7 +146,7 @@ int32_t do_add( to->_frac_length = std::max(value1._frac_length, value2._frac_length); if (error) { // E_DEC_TRUNCATED int32_t to_frac_length = to->_frac_length; - //ATTN: _int_lenggh is bit-field struct member, can not take address directly. + //ATTN: _int_length is bit-field struct member, can not take address directly. set_if_smaller(&to_frac_length, frac0 * DIG_PER_DEC1); to->_frac_length = to_frac_length; set_if_smaller(&frac1, frac0); @@ -385,7 +385,7 @@ int do_mul(const DecimalValue& value1, const DecimalValue& value2, DecimalValue* to->_frac_length = value1._frac_length + value2._frac_length; // store size in digits int32_t temp_to_frac_length = to->_frac_length; - //ATTN: _int_lenggh is bit-field struct member, can not take address directly. + //ATTN: _int_length is bit-field struct member, can not take address directly. set_if_smaller(&temp_to_frac_length, NOT_FIXED_DEC); to->_frac_length = temp_to_frac_length; if (error) { diff --git a/be/src/runtime/decimal_value.h b/be/src/runtime/decimal_value.h index dc14606c2fa571..6886cd7686b460 100755 --- a/be/src/runtime/decimal_value.h +++ b/be/src/runtime/decimal_value.h @@ -331,7 +331,7 @@ class DecimalValue { // @param from - value to convert. Doesn't have to be \0 terminated! // will stop at the fist non-digit char(nor '.' 'e' 'E'), // or reaches the length - // @param length - maximum lengnth + // @param length - maximum length // @return error number. // // E_DEC_OK/E_DEC_TRUNCATED/E_DEC_OVERFLOW/E_DEC_BAD_NUM/E_DEC_OOM diff --git a/be/src/runtime/decimalv2_value.cpp b/be/src/runtime/decimalv2_value.cpp index dacb88dd2d3581..c30ff2dd0b4a5d 100644 --- a/be/src/runtime/decimalv2_value.cpp +++ b/be/src/runtime/decimalv2_value.cpp @@ -297,7 +297,7 @@ std::string DecimalV2Value::to_string() const { // NOTE: only change abstract value, do not change sign void DecimalV2Value::to_max_decimal(int32_t precision, int32_t scale) { - bool is_negtive = (_value < 0); + bool is_negative = (_value < 0); static const int64_t INT_MAX_VALUE[PRECISION] = { 9ll, 99ll, @@ -330,7 +330,7 @@ void DecimalV2Value::to_max_decimal(int32_t precision, int32_t scale) { 999999999 }; - // precison > 0 && scale >= 0 && scale <= SCALE + // precision > 0 && scale >= 0 && scale <= SCALE if (precision <= 0 || scale < 0) return; if (scale > SCALE) scale = SCALE; @@ -340,13 +340,13 @@ void DecimalV2Value::to_max_decimal(int32_t precision, int32_t scale) { precision = PRECISION - SCALE + scale; } else if (precision <= scale) { LOG(WARNING) << "Warning: error precision: " << precision << " or scale: " << scale; - precision = scale + 1; // corect error precision + precision = scale + 1; // correct error precision } int64_t int_value = INT_MAX_VALUE[precision - scale - 1]; int64_t frac_value = scale == 0? 0 : FRAC_MAX_VALUE[scale - 1]; _value = static_cast(int_value) * DecimalV2Value::ONE_BILLION + frac_value; - if (is_negtive) _value = -_value; + if (is_negative) _value = -_value; } std::size_t hash_value(DecimalV2Value const& value) { diff --git a/be/src/runtime/decimalv2_value.h b/be/src/runtime/decimalv2_value.h index a1e99f670d1bef..9d515cc5275fd4 100644 --- a/be/src/runtime/decimalv2_value.h +++ b/be/src/runtime/decimalv2_value.h @@ -70,8 +70,8 @@ class DecimalV2Value { inline bool from_olap_decimal(int64_t int_value, int64_t frac_value) { bool success = true; - bool is_negtive = (int_value < 0 || frac_value < 0); - if (is_negtive) { + bool is_negative = (int_value < 0 || frac_value < 0); + if (is_negative) { int_value = std::abs(int_value); frac_value = std::abs(frac_value); } @@ -87,7 +87,7 @@ class DecimalV2Value { } _value = static_cast(int_value) * ONE_BILLION + frac_value; - if (is_negtive) _value = -_value; + if (is_negative) _value = -_value; return success; } @@ -215,7 +215,7 @@ class DecimalV2Value { // @param from - value to convert. Doesn't have to be \0 terminated! // will stop at the fist non-digit char(nor '.' 'e' 'E'), // or reaches the length - // @param length - maximum lengnth + // @param length - maximum length // @return error number. // // E_DEC_OK/E_DEC_TRUNCATED/E_DEC_OVERFLOW/E_DEC_BAD_NUM/E_DEC_OOM diff --git a/be/src/runtime/descriptor_helper.h b/be/src/runtime/descriptor_helper.h index 1fd28f6caee746..7da7c2a9852b6c 100644 --- a/be/src/runtime/descriptor_helper.h +++ b/be/src/runtime/descriptor_helper.h @@ -116,8 +116,8 @@ class TTupleDescriptorBuilder { num_nullables++; } } - int null_byetes = (num_nullables + 7) / 8; - int offset = null_byetes; + int null_bytes = (num_nullables + 7) / 8; + int offset = null_bytes; int null_offset = 0; for (int i = 0; i < _slot_descs.size(); ++i) { auto& slot_desc = _slot_descs[i]; @@ -142,7 +142,7 @@ class TTupleDescriptorBuilder { _tuple_desc.id = _tuple_id; _tuple_desc.byteSize = offset; - _tuple_desc.numNullBytes = null_byetes; + _tuple_desc.numNullBytes = null_bytes; _tuple_desc.numNullSlots = _slot_descs.size(); tb->add_slots(_slot_descs); diff --git a/be/src/runtime/disk_io_mgr.cc b/be/src/runtime/disk_io_mgr.cc index 29a282b041cbc1..5118a21ed8e564 100644 --- a/be/src/runtime/disk_io_mgr.cc +++ b/be/src/runtime/disk_io_mgr.cc @@ -258,7 +258,7 @@ static void check_sse_support() { if (!CpuInfo::is_supported(CpuInfo::SSE4_2)) { LOG(WARNING) << "This machine does not support sse4_2. The default IO system " "configurations are suboptimal for this hardware. Consider " - "increasing the number of threads per disk by restarting impalad " + "increasing the number of threads per disk by restarting doris " "using the --num_threads_per_disk flag with a higher value"; } } @@ -394,7 +394,7 @@ Status DiskIoMgr::init(const std::shared_ptr& process_mem_tracker) { // _cached_read_options = hadoopRzOptionsAlloc(); // DCHECK(_cached_read_options != NULL); - // Disable checksumming for cached reads. + // Disable checksum for cached reads. // int ret = hadoopRzOptionsSetSkipChecksum(_cached_read_options, true); // DCHECK_EQ(ret, 0); // Disable automatic fallback for cached reads. diff --git a/be/src/runtime/disk_io_mgr.h b/be/src/runtime/disk_io_mgr.h index 2636581168f3b2..9b00ebbc6dbf5f 100644 --- a/be/src/runtime/disk_io_mgr.h +++ b/be/src/runtime/disk_io_mgr.h @@ -475,7 +475,7 @@ class DiskIoMgr { // The soft capacity limit for _ready_buffers. _ready_buffers can exceed // the limit temporarily as the capacity is adjusted dynamically. - // In that case, the capcity is only realized when the caller removes buffers + // In that case, the capacity is only realized when the caller removes buffers // from _ready_buffers. int _ready_buffers_capacity; @@ -567,7 +567,7 @@ class DiskIoMgr { // unregister_context also cancels the reader/writer from the disk IoMgr. void unregister_context(RequestContext* context); - // This function cancels the context asychronously. All outstanding requests + // This function cancels the context asynchronously. All outstanding requests // are aborted and tracking structures cleaned up. This does not need to be // called if the context finishes normally. // This will also fail any outstanding get_next()/Read requests. diff --git a/be/src/runtime/disk_io_mgr_scan_range.cc b/be/src/runtime/disk_io_mgr_scan_range.cc index 7b6146f9fb75f7..d57c606b6ba845 100644 --- a/be/src/runtime/disk_io_mgr_scan_range.cc +++ b/be/src/runtime/disk_io_mgr_scan_range.cc @@ -367,7 +367,7 @@ void DiskIoMgr::ScanRange::close() { * // Profiles show that both the JNI array allocation and the memcpy adds much more * // overhead for larger buffers, so limit the size of each read request. 128K was * // chosen empirically by trying values between 4K and 8M and optimizing for lower CPU - * // utilization and higher S3 througput. + * // utilization and higher S3 throughput. * if (_disk_id == _io_mgr->RemoteS3DiskId()) { * DCHECK(IsS3APath(file())); * return 128 * 1024; diff --git a/be/src/runtime/dpp_sink.cpp b/be/src/runtime/dpp_sink.cpp index 74953e56c85c4e..a0afb80abcd7d4 100644 --- a/be/src/runtime/dpp_sink.cpp +++ b/be/src/runtime/dpp_sink.cpp @@ -158,8 +158,8 @@ class Translator { // Prepare this translator, includes // 1. new sorter for sort data - // 2. new comparator for aggreage data with same key - // 3. new witer for write data later + // 2. new comparator for aggregate data with same key + // 3. new writer for write data later // 4. create value updaters Status prepare(RuntimeState* state); @@ -191,8 +191,8 @@ class Translator { // and put to object pool of 'state', so no need to delete it Status create_sorter(RuntimeState* state); - // Helper to create comparetor used to aggregate same rows - Status create_comparetor(RuntimeState* state); + // Helper to create comparator used to aggregate same rows + Status create_comparator(RuntimeState* state); // Create writer to write data // same with sorter, so don't worry about its lifecycle @@ -295,7 +295,7 @@ Status Translator::create_sorter(RuntimeState* state) { return Status::OK(); } -Status Translator::create_comparetor(RuntimeState* state) { +Status Translator::create_comparator(RuntimeState* state) { RETURN_IF_ERROR(Expr::clone_if_not_exists(_rollup_schema.keys(), state, &_last_row_expr_ctxs)); RETURN_IF_ERROR(Expr::clone_if_not_exists(_rollup_schema.keys(), state, &_cur_row_expr_ctxs)); return Status::OK(); @@ -513,7 +513,7 @@ Status Translator::create_value_updaters() { _value_updaters.push_back(update_min); break; case TAggregationType::SUM: - return Status::InternalError("Unsupport sum operation on date/datetime column."); + return Status::InternalError("Unsupported sum operation on date/datetime column."); default: // replace _value_updaters.push_back(fake_update); @@ -527,7 +527,7 @@ Status Translator::create_value_updaters() { case TAggregationType::MAX: case TAggregationType::MIN: case TAggregationType::SUM: - return Status::InternalError("Unsupport max/min/sum operation on char/varchar column."); + return Status::InternalError("Unsupported max/min/sum operation on char/varchar column."); default: // Only replace has meaning _value_updaters.push_back(fake_update); @@ -544,7 +544,7 @@ Status Translator::create_value_updaters() { case TAggregationType::MAX: case TAggregationType::MIN: case TAggregationType::SUM: - return Status::InternalError("Unsupport max/min/sum operation on hll column."); + return Status::InternalError("Unsupported max/min/sum operation on hll column."); default: _value_updaters.push_back(fake_update); break; @@ -554,7 +554,7 @@ Status Translator::create_value_updaters() { default: { std::stringstream ss; ss << "Unsupported column type(" << _rollup_schema.values()[i]->root()->type() << ")"; - // No operation, just pusb back a fake update + // No operation, just push back a fake update return Status::InternalError(ss.str()); break; } @@ -586,8 +586,8 @@ Status Translator::prepare(RuntimeState* state) { // 1. Create sorter RETURN_IF_ERROR(create_sorter(state)); - // 2. Create comparetor - RETURN_IF_ERROR(create_comparetor(state)); + // 2. Create comparator + RETURN_IF_ERROR(create_comparator(state)); // 3. Create writer RETURN_IF_ERROR(create_writer(state)); @@ -688,8 +688,8 @@ void HllDppSinkMerge::update_hll_set(TupleRow* agg_row, TupleRow* row, } } else if (value->type == HLL_DATA_EXPLICIT) { value->hash_set.insert(row_resolver.get_explicit_value(0)); - if (value->hash_set.size() > HLL_EXPLICLIT_INT64_NUM) { - value->type = HLL_DATA_SPRASE; + if (value->hash_set.size() > HLL_EXPLICIT_INT64_NUM) { + value->type = HLL_DATA_SPARSE; for (std::set::iterator iter = value->hash_set.begin(); iter != value->hash_set.end(); iter++) { uint64_t hash = *iter; int idx = hash % REGISTERS_SIZE; @@ -701,7 +701,7 @@ void HllDppSinkMerge::update_hll_set(TupleRow* agg_row, TupleRow* row, } } } - } else if (value->type == HLL_DATA_SPRASE) { + } else if (value->type == HLL_DATA_SPARSE) { uint64_t hash = row_resolver.get_explicit_value(0); int idx = hash % REGISTERS_SIZE; uint8_t first_one_bit = __builtin_ctzl(hash >> HLL_COLUMN_PRECISION) + 1; @@ -733,7 +733,7 @@ void HllDppSinkMerge::finalize_one_merge(TupleRow* agg_row, MemPool* pool, memset(result, 0, set_len); HllSetHelper::set_explicit(result, value->hash_set, set_len); agg_row_sv->replace(result, set_len); - } else if (value->type == HLL_DATA_SPRASE) { + } else if (value->type == HLL_DATA_SPARSE) { // full explicit set if (value->index_to_value.size() * (sizeof(HllSetResolver::SparseIndexType) + sizeof(HllSetResolver::SparseValueType)) @@ -774,7 +774,7 @@ void HllDppSinkMerge::close() { // use batch to release data Status Translator::process_one_row(TupleRow* row) { if (row == nullptr) { - // Something strange happend + // Something strange happened std::stringstream ss; ss << "row is nullptr."; LOG(ERROR) << ss.str(); diff --git a/be/src/runtime/dpp_sink.h b/be/src/runtime/dpp_sink.h index 3976dc4d58d296..a9198109d68370 100644 --- a/be/src/runtime/dpp_sink.h +++ b/be/src/runtime/dpp_sink.h @@ -36,9 +36,9 @@ class Translator; class RuntimeProfile; class CountDownLatch; -// This class swallow data which is splited by partition and rollup. +// This class swallow data which is split by partition and rollup. // Sort input data and then aggregate data contains same key, -// then wirte new data into dpp writer for next push operation. +// then write new data into dpp writer for next push operation. class DppSink { public: DppSink(const RowDescriptor& row_desc, diff --git a/be/src/runtime/dpp_writer.cpp b/be/src/runtime/dpp_writer.cpp index f4c0addb8b1fce..7da076d25408ea 100644 --- a/be/src/runtime/dpp_writer.cpp +++ b/be/src/runtime/dpp_writer.cpp @@ -123,7 +123,7 @@ Status DppWriter::append_one_row(TupleRow* row) { for (int i = 0; i < num_columns; ++i) { char* position = _buf + pos; void* item = _output_expr_ctxs[i]->get_value(row); - // What happend failed??? + // What happened failed??? if (true == _output_expr_ctxs[i]->is_nullable()) { int index = off % 8; if (item == nullptr) { diff --git a/be/src/runtime/etl_job_mgr.cpp b/be/src/runtime/etl_job_mgr.cpp index 7b712432949877..56c39820021800 100644 --- a/be/src/runtime/etl_job_mgr.cpp +++ b/be/src/runtime/etl_job_mgr.cpp @@ -295,12 +295,12 @@ void EtlJobMgr::debug(std::stringstream& ss) { std::lock_guard l(_lock); // Debug summary - ss << "we have " << _running_jobs.size() << " jobs Runnings\n"; + ss << "we have " << _running_jobs.size() << " jobs Running\n"; ss << "we have " << _failed_jobs.size() << " jobs Failed\n"; ss << "we have " << _success_jobs.size() << " jobs Successful\n"; // Debug running jobs for (auto& it : _running_jobs) { - ss << "runing jobs: " << it << "\n"; + ss << "running jobs: " << it << "\n"; } // Debug success jobs for (auto& it : _success_jobs) { diff --git a/be/src/runtime/file_result_writer.cpp b/be/src/runtime/file_result_writer.cpp index 8d777e6f1d8c66..281b7133d93122 100644 --- a/be/src/runtime/file_result_writer.cpp +++ b/be/src/runtime/file_result_writer.cpp @@ -87,7 +87,7 @@ Status FileResultWriter::_create_file_writer() { _parquet_writer = new ParquetWriterWrapper(_file_writer, _output_expr_ctxs); break; default: - return Status::InternalError(strings::Substitute("unsupport file format: $0", _file_opts->file_format)); + return Status::InternalError(strings::Substitute("unsupported file format: $0", _file_opts->file_format)); } LOG(INFO) << "create file for exporting query result. file name: " << file_name << ". query id: " << print_id(_state->query_id()); diff --git a/be/src/runtime/file_result_writer.h b/be/src/runtime/file_result_writer.h index bc0040327efc8e..39d00daa7d788f 100644 --- a/be/src/runtime/file_result_writer.h +++ b/be/src/runtime/file_result_writer.h @@ -103,7 +103,7 @@ class FileResultWriter final : public ResultWriter { // TODO(cmy): I simply use a stringstrteam to buffer the data, to avoid calling // file writer's write() for every single row. // But this cannot solve the problem of a row of data that is too large. - // For exampel: bitmap_to_string() may return large volumn of data. + // For example: bitmap_to_string() may return large volumn of data. // And the speed is relative low, in my test, is about 6.5MB/s. std::stringstream _plain_text_outstream; static const size_t OUTSTREAM_BUFFER_SIZE_BYTES; @@ -114,7 +114,7 @@ class FileResultWriter final : public ResultWriter { int _file_idx = 0; RuntimeProfile* _parent_profile; // profile from result sink, not owned - // total time cost on append batch opertion + // total time cost on append batch operation RuntimeProfile::Counter* _append_row_batch_timer = nullptr; // tuple convert timer, child timer of _append_row_batch_timer RuntimeProfile::Counter* _convert_tuple_timer = nullptr; diff --git a/be/src/runtime/fragment_mgr.cpp b/be/src/runtime/fragment_mgr.cpp index e25a478e71922f..489400540d0cae 100644 --- a/be/src/runtime/fragment_mgr.cpp +++ b/be/src/runtime/fragment_mgr.cpp @@ -141,7 +141,7 @@ class FragmentExecState { TUniqueId _query_id; // Id of this instance TUniqueId _fragment_instance_id; - // Used to reoprt to coordinator which backend is over + // Used to report to coordinator which backend is over int _backend_num; ExecEnv* _exec_env; TNetworkAddress _coord_addr; @@ -525,7 +525,7 @@ void FragmentMgr::cancel_worker() { } for (auto& id : to_delete) { cancel(id, PPlanFragmentCancelReason::TIMEOUT); - LOG(INFO) << "FragmentMgr cancel worker going to cancel timouet fragment " << print_id(id); + LOG(INFO) << "FragmentMgr cancel worker going to cancel timeout fragment " << print_id(id); } } while (!_stop_background_threads_latch.wait_for(MonoDelta::FromSeconds(1))); LOG(INFO) << "FragmentMgr cancel worker is going to exit."; diff --git a/be/src/runtime/initial_reservations.cc b/be/src/runtime/initial_reservations.cc index 2daf7aa0a5e544..bd2d0e1ea42b57 100644 --- a/be/src/runtime/initial_reservations.cc +++ b/be/src/runtime/initial_reservations.cc @@ -50,7 +50,7 @@ Status InitialReservations::Init( DCHECK_EQ(0, initial_reservations_.GetReservation()) << "Already inited"; if (!initial_reservations_.IncreaseReservation(query_min_reservation)) { std::stringstream ss; - ss << "Minimum reservation unavaliable: " << query_min_reservation + ss << "Minimum reservation unavailable: " << query_min_reservation << " query id:" << query_id; return Status::MinimumReservationUnavailable(ss.str()); } diff --git a/be/src/runtime/load_channel.cpp b/be/src/runtime/load_channel.cpp index 72ace2357783f5..c8cf06911317dc 100644 --- a/be/src/runtime/load_channel.cpp +++ b/be/src/runtime/load_channel.cpp @@ -126,7 +126,7 @@ void LoadChannel::handle_mem_exceed_limit(bool force) { channel->reduce_mem_usage(); } else { // should not happen, add log to observe - LOG(WARNING) << "fail to find suitable tablets-channel when memory execeed. " + LOG(WARNING) << "fail to find suitable tablets-channel when memory exceed. " << "load_id=" << _load_id; } } diff --git a/be/src/runtime/load_channel.h b/be/src/runtime/load_channel.h index 8bea5954bf78ea..d765108b48e811 100644 --- a/be/src/runtime/load_channel.h +++ b/be/src/runtime/load_channel.h @@ -74,7 +74,7 @@ class LoadChannel { bool _find_largest_consumption_channel(std::shared_ptr* channel); UniqueId _load_id; - // Tracks the total memory comsupted by current load job on this BE + // Tracks the total memory consumed by current load job on this BE std::shared_ptr _mem_tracker; // lock protect the tablets channel map diff --git a/be/src/runtime/load_channel_mgr.cpp b/be/src/runtime/load_channel_mgr.cpp index 7f7e87e6604ea5..0fb569212b2437 100644 --- a/be/src/runtime/load_channel_mgr.cpp +++ b/be/src/runtime/load_channel_mgr.cpp @@ -68,7 +68,7 @@ LoadChannelMgr::LoadChannelMgr() : _stop_background_threads_latch(1) { std::lock_guard l(_lock); return _load_channels.size(); }); - _lastest_success_channel = new_lru_cache(1024); + _last_success_channel = new_lru_cache(1024); } LoadChannelMgr::~LoadChannelMgr() { @@ -77,7 +77,7 @@ LoadChannelMgr::~LoadChannelMgr() { if (_load_channels_clean_thread) { _load_channels_clean_thread->join(); } - delete _lastest_success_channel; + delete _last_success_channel; } Status LoadChannelMgr::init(int64_t process_mem_limit) { @@ -129,10 +129,10 @@ Status LoadChannelMgr::add_batch( std::lock_guard l(_lock); auto it = _load_channels.find(load_id); if (it == _load_channels.end()) { - auto handle = _lastest_success_channel->lookup(load_id.to_string()); + auto handle = _last_success_channel->lookup(load_id.to_string()); // success only when eos be true if (handle != nullptr) { - _lastest_success_channel->release(handle); + _last_success_channel->release(handle); if (request.has_eos() && request.eos()) { return Status::OK(); } @@ -157,9 +157,9 @@ Status LoadChannelMgr::add_batch( { std::lock_guard l(_lock); _load_channels.erase(load_id); - auto handle = _lastest_success_channel->insert( + auto handle = _last_success_channel->insert( load_id.to_string(), nullptr, 1, dummy_deleter); - _lastest_success_channel->release(handle); + _last_success_channel->release(handle); } VLOG(1) << "removed load channel " << load_id; } @@ -183,7 +183,7 @@ void LoadChannelMgr::_handle_mem_exceed_limit() { } if (max_consume == 0) { // should not happen, add log to observe - LOG(WARNING) << "failed to find suitable load channel when total load mem limit execeed"; + LOG(WARNING) << "failed to find suitable load channel when total load mem limit exceed"; return; } DCHECK(channel.get() != nullptr); diff --git a/be/src/runtime/load_channel_mgr.h b/be/src/runtime/load_channel_mgr.h index e506e192bb5f85..b728b2141dec6f 100644 --- a/be/src/runtime/load_channel_mgr.h +++ b/be/src/runtime/load_channel_mgr.h @@ -38,8 +38,8 @@ namespace doris { class Cache; class LoadChannel; -// LoadChannelMgr -> LoadChannel -> TabletsChannel -> DeltaWrtier -// All dispached load data for this backend is routed from this class +// LoadChannelMgr -> LoadChannel -> TabletsChannel -> DeltaWriter +// All dispatched load data for this backend is routed from this class class LoadChannelMgr { public: LoadChannelMgr(); @@ -70,7 +70,7 @@ class LoadChannelMgr { std::mutex _lock; // load id -> load channel std::unordered_map> _load_channels; - Cache* _lastest_success_channel = nullptr; + Cache* _last_success_channel = nullptr; // check the total load mem consumption of this Backend std::shared_ptr _mem_tracker; diff --git a/be/src/runtime/load_path_mgr.cpp b/be/src/runtime/load_path_mgr.cpp index d7e637cdd5d237..c0c6f739c395ca 100644 --- a/be/src/runtime/load_path_mgr.cpp +++ b/be/src/runtime/load_path_mgr.cpp @@ -76,7 +76,7 @@ Status LoadPathMgr::allocate_dir( const std::string& label, std::string* prefix) { if (_path_vec.empty()) { - return Status::InternalError("No load path configed."); + return Status::InternalError("No load path configured."); } std::string path; auto size = _path_vec.size(); diff --git a/be/src/runtime/mem_pool.cpp b/be/src/runtime/mem_pool.cpp index 799ec1a7b65b29..61b70c94dfc7bf 100644 --- a/be/src/runtime/mem_pool.cpp +++ b/be/src/runtime/mem_pool.cpp @@ -165,17 +165,17 @@ void MemPool::acquire_data(MemPool* src, bool keep_current) { } auto end_chunk = src->chunks_.begin() + num_acquired_chunks; - int64_t total_transfered_bytes = 0; + int64_t total_transferred_bytes = 0; for (auto i = src->chunks_.begin(); i != end_chunk; ++i) { - total_transfered_bytes += i->chunk.size; + total_transferred_bytes += i->chunk.size; } - src->total_reserved_bytes_ -= total_transfered_bytes; - total_reserved_bytes_ += total_transfered_bytes; + src->total_reserved_bytes_ -= total_transferred_bytes; + total_reserved_bytes_ += total_transferred_bytes; // Skip unnecessary atomic ops if the mem_trackers are the same. if (src->mem_tracker_ != mem_tracker_) { - src->mem_tracker_->Release(total_transfered_bytes); - mem_tracker_->Consume(total_transfered_bytes); + src->mem_tracker_->Release(total_transferred_bytes); + mem_tracker_->Consume(total_transferred_bytes); } // insert new chunks after current_chunk_idx_ diff --git a/be/src/runtime/mem_pool.h b/be/src/runtime/mem_pool.h index 1933cdfeecfd49..842d813b4bba04 100644 --- a/be/src/runtime/mem_pool.h +++ b/be/src/runtime/mem_pool.h @@ -111,7 +111,7 @@ class MemPool { return allocate(size, DEFAULT_ALIGNMENT); } - /// Same as Allocate() excpect add a check when return a nullptr + /// Same as Allocate() expect add a check when return a nullptr OLAPStatus allocate_safely(int64_t size, uint8_t*& ret) { return allocate_safely(size, DEFAULT_ALIGNMENT, ret); } @@ -153,7 +153,7 @@ class MemPool { void acquire_data(MemPool* src, bool keep_current); // Exchange all chunks with input source, including reserved chunks. - // This funciton will keep its own MemTracker, and upate it after exchange. + // This function will keep its own MemTracker, and update it after exchange. // Why we need this other than std::swap? Because swap will swap MemTracker too, which would // lead error. We only has MemTracker's pointer, which can be invalid after swap. void exchange_data(MemPool* other); diff --git a/be/src/runtime/memory/chunk_allocator.h b/be/src/runtime/memory/chunk_allocator.h index 38f84bbf1f47c2..2347cc2ad9f960 100644 --- a/be/src/runtime/memory/chunk_allocator.h +++ b/be/src/runtime/memory/chunk_allocator.h @@ -36,7 +36,7 @@ class MetricEntity; // ChunkAllocator has one ChunkArena for each CPU core, it will try to allocate // memory from current core arena firstly. In this way, there will be no lock contention // between concurrently-running threads. If this fails, ChunkAllocator will try to allocate -// memroy from other core's arena. +// memory from other core's arena. // // Memory Reservation // ChunkAllocator has a limit about how much free chunk bytes it can reserve, above which diff --git a/be/src/runtime/mysql_result_writer.cpp b/be/src/runtime/mysql_result_writer.cpp index 0c8cc43ca97be1..e93317ae1bdce5 100644 --- a/be/src/runtime/mysql_result_writer.cpp +++ b/be/src/runtime/mysql_result_writer.cpp @@ -145,7 +145,7 @@ Status MysqlResultWriter::_add_one_row(TupleRow* row) { if (string_val->ptr == NULL) { if (string_val->len == 0) { - // 0x01 is a magic num, not usefull actually, just for present "" + // 0x01 is a magic num, not useful actually, just for present "" char* tmp_val = reinterpret_cast(0x01); buf_ret = _row_buffer->push_string(tmp_val, string_val->len); } else { diff --git a/be/src/runtime/mysql_result_writer.h b/be/src/runtime/mysql_result_writer.h index 57134afc895953..a8e52965f8ab25 100644 --- a/be/src/runtime/mysql_result_writer.h +++ b/be/src/runtime/mysql_result_writer.h @@ -29,7 +29,7 @@ class MysqlRowBuffer; class BufferControlBlock; class RuntimeProfile; -// convert the row batch to mysql protol row +// convert the row batch to mysql protocol row class MysqlResultWriter final : public ResultWriter { public: MysqlResultWriter(BufferControlBlock* sinker, @@ -55,7 +55,7 @@ class MysqlResultWriter final : public ResultWriter { MysqlRowBuffer* _row_buffer; RuntimeProfile* _parent_profile; // parent profile from result sink. not owned - // total time cost on append batch opertion + // total time cost on append batch operation RuntimeProfile::Counter* _append_row_batch_timer = nullptr; // tuple convert timer, child timer of _append_row_batch_timer RuntimeProfile::Counter* _convert_tuple_timer = nullptr; diff --git a/be/src/runtime/mysql_table_writer.h b/be/src/runtime/mysql_table_writer.h index e737c58e3c41b9..8da07db101c620 100644 --- a/be/src/runtime/mysql_table_writer.h +++ b/be/src/runtime/mysql_table_writer.h @@ -48,7 +48,7 @@ class MysqlTableWriter { MysqlTableWriter(const std::vector& output_exprs); ~MysqlTableWriter(); - // connnect to mysql server + // connect to mysql server Status open(const MysqlConnInfo& conn_info, const std::string& tbl); Status begin_trans() { diff --git a/be/src/runtime/plan_fragment_executor.cpp b/be/src/runtime/plan_fragment_executor.cpp index 97707f41d7febc..dac1c71322e91b 100644 --- a/be/src/runtime/plan_fragment_executor.cpp +++ b/be/src/runtime/plan_fragment_executor.cpp @@ -291,7 +291,7 @@ Status PlanFragmentExecutor::open_internal() { } SCOPED_TIMER(profile()->total_time_counter()); - // Collect this plan and sub plan statisticss, and send to parent plan. + // Collect this plan and sub plan statistics, and send to parent plan. if (_collect_query_statistics_with_every_batch) { collect_query_statistics(); } @@ -561,9 +561,9 @@ void PlanFragmentExecutor::close() { // Compute the _local_time_percent before pretty_print the runtime_profile // Before add this operation, the print out like that: // UNION_NODE (id=0):(Active: 56.720us, non-child: 00.00%) - // After add thie operation, the print out like that: + // After add the operation, the print out like that: // UNION_NODE (id=0):(Active: 56.720us, non-child: 82.53%) - // We can easily know the exec node excute time without child time consumed. + // We can easily know the exec node execute time without child time consumed. _runtime_state->runtime_profile()->compute_time_in_profile(); _runtime_state->runtime_profile()->pretty_print(&ss); LOG(INFO) << ss.str(); diff --git a/be/src/runtime/result_queue_mgr.cpp b/be/src/runtime/result_queue_mgr.cpp index 89f8e6ccde6419..fe001e295d9176 100644 --- a/be/src/runtime/result_queue_mgr.cpp +++ b/be/src/runtime/result_queue_mgr.cpp @@ -56,12 +56,12 @@ Status ResultQueueMgr::fetch_result(const TUniqueId& fragment_instance_id, std:: } // check queue status before get result RETURN_IF_ERROR(queue->status()); - bool sucess = queue->blocking_get(result); - if (sucess) { + bool success = queue->blocking_get(result); + if (success) { // sentinel nullptr indicates scan end if (*result == nullptr) { *eos = true; - // put sentinel for consistency, avoid repeated invoking fetch result when hava no rowbatch + // put sentinel for consistency, avoid repeated invoking fetch result when have no rowbatch if (queue != nullptr) { queue->blocking_put(nullptr); } diff --git a/be/src/runtime/routine_load/data_consumer.cpp b/be/src/runtime/routine_load/data_consumer.cpp index ca2675aa78545d..32240f11b23795 100644 --- a/be/src/runtime/routine_load/data_consumer.cpp +++ b/be/src/runtime/routine_load/data_consumer.cpp @@ -203,7 +203,7 @@ Status KafkaDataConsumer::group_consume( ++received_rows; break; case RdKafka::ERR__TIMED_OUT: - // leave the status as OK, because this may happend + // leave the status as OK, because this may happened // if there is no data in kafka. LOG(INFO) << "kafka consume timeout: " << _id; break; @@ -219,7 +219,7 @@ Status KafkaDataConsumer::group_consume( if (done) { break; } } - LOG(INFO) << "kafka conumer done: " << _id << ", grp: " << _grp_id + LOG(INFO) << "kafka consumer done: " << _id << ", grp: " << _grp_id << ". cancelled: " << _cancelled << ", left time(ms): " << left_time << ", total cost(ms): " << watch.elapsed_time() / 1000 / 1000 diff --git a/be/src/runtime/routine_load/routine_load_task_executor.cpp b/be/src/runtime/routine_load/routine_load_task_executor.cpp index 765d2999b20b74..071d6042acf128 100644 --- a/be/src/runtime/routine_load/routine_load_task_executor.cpp +++ b/be/src/runtime/routine_load/routine_load_task_executor.cpp @@ -140,7 +140,7 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) { if(task.__isset.format) { ctx->format = task.format; } - // the routine load task'txn has alreay began in FE. + // the routine load task'txn has already began in FE. // so it need to rollback if encounter error. ctx->need_rollback = true; ctx->max_filter_ratio = 1.0; diff --git a/be/src/runtime/row_batch.h b/be/src/runtime/row_batch.h index 3d36a3f3a54919..addacb5db83127 100644 --- a/be/src/runtime/row_batch.h +++ b/be/src/runtime/row_batch.h @@ -47,7 +47,7 @@ class PRowBatch; // The row batch reference a few different sources of memory. // 1. TupleRow ptrs - this is always owned and managed by the row batch. // 2. Tuple memory - this is allocated (or transferred to) the row batches tuple pool. -// 3. Auxillary tuple memory (e.g. string data) - this can either be stored externally +// 3. Auxiliary tuple memory (e.g. string data) - this can either be stored externally // (don't copy strings) or from the tuple pool (strings are copied). If external, // the data is in an io buffer that may not be attached to this row batch. The // creator of that row batch has to make sure that the io buffer is not recycled @@ -357,7 +357,7 @@ class RowBatch : public RowBatchInterface { // multiple threads which push row batches. // TODO: this is wasteful and makes a copy that's unnecessary. Think about cleaning // this up. - // TOOD: rename this or unify with TransferResourceOwnership() + // TODO: rename this or unify with TransferResourceOwnership() void acquire_state(RowBatch* src); // Deep copy all rows this row batch into dst, using memory allocated from @@ -508,7 +508,7 @@ class RowBatch : public RowBatchInterface { // This is a string so we can swap() with the string in the TRowBatch we're serializing // to (we don't compress directly into the TRowBatch in case the compressed data is // longer than the uncompressed data). Swapping avoids copying data to the TRowBatch and - // avoids excess memory allocations: since we reuse RowBatchs and TRowBatchs, and + // avoids excess memory allocations: since we reuse RowBatches and TRowBatchs, and // assuming all row batches are roughly the same size, all strings will eventually be // allocated to the right size. std::string _compression_scratch; diff --git a/be/src/runtime/stream_load/stream_load_context.h b/be/src/runtime/stream_load/stream_load_context.h index 64d2aa3650bba6..0b4adaceb3fc1b 100644 --- a/be/src/runtime/stream_load/stream_load_context.h +++ b/be/src/runtime/stream_load/stream_load_context.h @@ -70,7 +70,7 @@ class KafkaLoadInfo { // partition -> begin offset, inclusive. std::map begin_offset; - // partiton -> commit offset, inclusive. + // partition -> commit offset, inclusive. std::map cmt_offset; //custom kafka property key -> value std::map properties; diff --git a/be/src/runtime/stream_load/stream_load_executor.cpp b/be/src/runtime/stream_load/stream_load_executor.cpp index 13dad53cfd2ba2..696399b427693b 100644 --- a/be/src/runtime/stream_load/stream_load_executor.cpp +++ b/be/src/runtime/stream_load/stream_load_executor.cpp @@ -282,7 +282,7 @@ bool StreamLoadExecutor::collect_load_stat(StreamLoadContext* ctx, TTxnCommitAtt break; } default: - // unknown load type, should not happend + // unknown load type, should not happened return false; } diff --git a/be/src/runtime/stream_load/stream_load_pipe.h b/be/src/runtime/stream_load/stream_load_pipe.h index 3deb4043f8aedc..43bb881d0fa0c3 100644 --- a/be/src/runtime/stream_load/stream_load_pipe.h +++ b/be/src/runtime/stream_load/stream_load_pipe.h @@ -165,7 +165,7 @@ class StreamLoadPipe : public MessageBodySink, public FileReader { return Status::InternalError("Not implemented"); } - // called when comsumer finished + // called when consumer finished void close() override { cancel(); } @@ -189,7 +189,7 @@ class StreamLoadPipe : public MessageBodySink, public FileReader { return Status::OK(); } - // called when producer/comsumer failed + // called when producer/consumer failed void cancel() override { { std::lock_guard l(_lock); diff --git a/be/src/runtime/user_function_cache.h b/be/src/runtime/user_function_cache.h index e53fe46fb9e8f6..1bcc23eb7e590d 100644 --- a/be/src/runtime/user_function_cache.h +++ b/be/src/runtime/user_function_cache.h @@ -27,14 +27,14 @@ namespace doris { struct UserFunctionCacheEntry; -// Used to cache a user function. Theses functions inlcude -// UDF(User Definfed Function) and UDAF(User Defined Aggregate -// Function), and maybe inlucde UDTF(User Defined Table +// Used to cache a user function. Theses functions include +// UDF(User Defined Function) and UDAF(User Defined Aggregate +// Function), and maybe include UDTF(User Defined Table // Function) in future. A user defined function may be splitted // into several functions, for example, UDAF is splitted into // InitFn, MergeFn, FinalizeFn... // In Doris, we call UDF/UDAF/UDTF UserFunction, and we call -// implement function Fucntion. +// implement function Function. // An UserFunction have a function id, we can find library with // this id. When we add user function into cache, we need to // download from URL and check its checksum. So if we find a function diff --git a/be/src/service/doris_main.cpp b/be/src/service/doris_main.cpp index 1734528cb78a83..8181a57b20306e 100644 --- a/be/src/service/doris_main.cpp +++ b/be/src/service/doris_main.cpp @@ -163,7 +163,7 @@ int main(int argc, char** argv) { exit(-1); } - // initilize libcurl here to avoid concurrent initialization + // initialize libcurl here to avoid concurrent initialization auto curl_ret = curl_global_init(CURL_GLOBAL_ALL); if (curl_ret != 0) { LOG(FATAL) << "fail to initialize libcurl, curl_ret=" << curl_ret; diff --git a/be/src/service/internal_service.cpp b/be/src/service/internal_service.cpp index 99395a36fb900a..799b8475449862 100644 --- a/be/src/service/internal_service.cpp +++ b/be/src/service/internal_service.cpp @@ -166,10 +166,10 @@ void PInternalServiceImpl::cancel_plan_fragment( Status st; if (request->has_cancel_reason()) { - LOG(INFO) << "cancel framgent, fragment_instance_id=" << print_id(tid) << ", reason: " << request->cancel_reason(); + LOG(INFO) << "cancel fragment, fragment_instance_id=" << print_id(tid) << ", reason: " << request->cancel_reason(); st = _exec_env->fragment_mgr()->cancel(tid, request->cancel_reason()); } else { - LOG(INFO) << "cancel framgent, fragment_instance_id=" << print_id(tid); + LOG(INFO) << "cancel fragment, fragment_instance_id=" << print_id(tid); st = _exec_env->fragment_mgr()->cancel(tid); } if (!st.ok()) { diff --git a/be/src/udf/udf.cpp b/be/src/udf/udf.cpp index b503edb05c45b2..20e748dd9585b5 100755 --- a/be/src/udf/udf.cpp +++ b/be/src/udf/udf.cpp @@ -496,7 +496,7 @@ void HllVal::agg_parse_and_cal(FunctionContext* ctx, const HllVal& other) { uint8_t first_one_bit = __builtin_ctzl(hash_value >> doris::HLL_COLUMN_PRECISION) + 1; pdata[idx] = std::max(pdata[idx], first_one_bit); } - } else if (resolver.get_hll_data_type() == doris::HLL_DATA_SPRASE) { + } else if (resolver.get_hll_data_type() == doris::HLL_DATA_SPARSE) { std::map& sparse_map = resolver.get_sparse_map(); diff --git a/be/src/util/arrow/row_batch.cpp b/be/src/util/arrow/row_batch.cpp index 5c35e44108075e..c799e3bbe614a4 100644 --- a/be/src/util/arrow/row_batch.cpp +++ b/be/src/util/arrow/row_batch.cpp @@ -168,7 +168,7 @@ Status convert_to_row_desc( // Convert RowBatch to an Arrow::Array // We should keep this function to keep compatible with arrow's type visitor -// Now we inherit TypeVisitor to use default Visit implemention +// Now we inherit TypeVisitor to use default Visit implementation class FromRowBatchConverter : public arrow::TypeVisitor { public: FromRowBatchConverter(const RowBatch& batch, @@ -222,7 +222,7 @@ class FromRowBatchConverter : public arrow::TypeVisitor { case TYPE_HLL: { const StringValue* string_val = (const StringValue*)(cell_ptr); if (string_val->len == 0) { - // 0x01 is a magic num, not usefull actually, just for present "" + // 0x01 is a magic num, not useful actually, just for present "" //char* tmp_val = reinterpret_cast(0x01); ARROW_RETURN_NOT_OK(builder.Append("")); } else { diff --git a/be/src/util/arrow/row_batch.h b/be/src/util/arrow/row_batch.h index b1ac280f695886..496d46d018e128 100644 --- a/be/src/util/arrow/row_batch.h +++ b/be/src/util/arrow/row_batch.h @@ -53,7 +53,7 @@ Status convert_to_row_desc( const arrow::Schema& schema, RowDescriptor** row_desc); -// Converte a Doris RowBatch to an Arrow RecordBatch. A valid Arrow Schema +// Convert a Doris RowBatch to an Arrow RecordBatch. A valid Arrow Schema // who should match RowBatch's schema is given. Memory used by result RecordBatch // will be allocated from input pool. Status convert_to_arrow_batch( diff --git a/be/src/util/arrow/row_block.cpp b/be/src/util/arrow/row_block.cpp index b932cd7b0b3e9d..b6e60bc2f3280a 100644 --- a/be/src/util/arrow/row_block.cpp +++ b/be/src/util/arrow/row_block.cpp @@ -145,7 +145,7 @@ Status convert_to_doris_schema(const arrow::Schema& schema, // Convert data in RowBlockV2 to an Arrow RecordBatch // We should keep this function to keep compatible with arrow's type visitor -// Now we inherit TypeVisitor to use default Visit implemention +// Now we inherit TypeVisitor to use default Visit implementation class FromRowBlockConverter : public arrow::TypeVisitor { public: FromRowBlockConverter(const RowBlockV2& block, diff --git a/be/src/util/bfd_parser.cpp b/be/src/util/bfd_parser.cpp index a0bff8618e60a7..a18a305665f5a0 100644 --- a/be/src/util/bfd_parser.cpp +++ b/be/src/util/bfd_parser.cpp @@ -172,7 +172,7 @@ int BfdParser::open_bfd() { return -1; } if (bfd_check_format(_abfd, bfd_archive)) { - LOG(WARNING) << "bfd_check_format for archive fialed because errmsg=" + LOG(WARNING) << "bfd_check_format for archive failed because errmsg=" << bfd_errmsg(bfd_get_error()); return -1; } diff --git a/be/src/util/cgroup_util.h b/be/src/util/cgroup_util.h index 91ff7044660402..05670e87c07079 100644 --- a/be/src/util/cgroup_util.h +++ b/be/src/util/cgroup_util.h @@ -41,7 +41,7 @@ class CGroupUtil { static bool enable(); private: - // return the glabal cgroup path of subsystem like 12:memory:/user.slice -> user.slice + // return the global cgroup path of subsystem like 12:memory:/user.slice -> user.slice static Status find_global_cgroup(const std::string& subsystem, std::string* path); // Returns the absolute path to the CGroup from inside the container. diff --git a/be/src/util/cpu_info.cpp b/be/src/util/cpu_info.cpp index 2b7f9cce9e5b22..650e41aae80f71 100755 --- a/be/src/util/cpu_info.cpp +++ b/be/src/util/cpu_info.cpp @@ -109,7 +109,7 @@ static struct { static const long num_flags = sizeof(flag_mappings) / sizeof(flag_mappings[0]); // Helper function to parse for hardware flags. -// values contains a list of space-seperated flags. check to see if the flags we +// values contains a list of space-separated flags. check to see if the flags we // care about are present. // Returns a bitmap of flags. int64_t ParseCPUFlags(const string& values) { diff --git a/be/src/util/doris_metrics.h b/be/src/util/doris_metrics.h index 924f365688eea2..47a893390869df 100644 --- a/be/src/util/doris_metrics.h +++ b/be/src/util/doris_metrics.h @@ -192,7 +192,7 @@ class DorisMetrics { MetricEntity* server_entity() { return _server_metric_entity.get(); } private: - // Don't allow constrctor + // Don't allow constructor DorisMetrics(); void _update(); diff --git a/be/src/util/dynamic_util.h b/be/src/util/dynamic_util.h index 5ab36df0c7e540..1b73b322160396 100644 --- a/be/src/util/dynamic_util.h +++ b/be/src/util/dynamic_util.h @@ -22,13 +22,13 @@ namespace doris { -// Look up smybols in a dynamically linked library. +// Look up symbols in a dynamically linked library. // handle -- handle to the library. NULL if loading from the current process. // symbol -- symbol to lookup. -// fn_ptr -- pointer tor retun addres of function. +// fn_ptr -- pointer tor return address of function. Status dynamic_lookup(void* handle, const char* symbol, void** fn_ptr); -// Open a dynamicly loaded library. +// Open a dynamically loaded library. // library -- name of the library. The default paths will be searched. // library can be NULL to get the handle for the current process. // handle -- returned handle to the library. diff --git a/be/src/util/faststring.cc b/be/src/util/faststring.cc index 26545f68d8ab4c..ddffcdcd622237 100644 --- a/be/src/util/faststring.cc +++ b/be/src/util/faststring.cc @@ -26,7 +26,7 @@ void faststring::GrowToAtLeast(size_t newcapacity) { // Not enough space, need to reserve more. // Don't reserve exactly enough space for the new string -- that makes it // too easy to write perf bugs where you get O(n^2) append. - // Instead, alwayhs expand by at least 50%. + // Instead, always expand by at least 50%. if (newcapacity < capacity_ * 3 / 2) { newcapacity = capacity_ * 3 / 2; diff --git a/be/src/util/file_cache.h b/be/src/util/file_cache.h index 1f8f8160bc79f5..30c3fb465a5509 100644 --- a/be/src/util/file_cache.h +++ b/be/src/util/file_cache.h @@ -140,7 +140,7 @@ class FileCache { std::shared_ptr _cache; // Indicates weather _cache is only owned by this, - // gernerally, _cache can be shared by other, in + // generally, _cache can be shared by other, in // this case, _is_cache_own is set to false. bool _is_cache_own = false; diff --git a/be/src/util/file_utils.cpp b/be/src/util/file_utils.cpp index 469983f53ffe3f..b8db85fa7a017d 100644 --- a/be/src/util/file_utils.cpp +++ b/be/src/util/file_utils.cpp @@ -195,7 +195,7 @@ std::string FileUtils::path_of_fd(int fd) { return path; } -Status FileUtils::split_pathes(const char* path, std::vector* path_vec) { +Status FileUtils::split_paths(const char* path, std::vector* path_vec) { path_vec->clear(); *path_vec = strings::Split(path, ";", strings::SkipWhitespace()); diff --git a/be/src/util/file_utils.h b/be/src/util/file_utils.h index 43045e3d38e17d..9192c75082156d 100644 --- a/be/src/util/file_utils.h +++ b/be/src/util/file_utils.h @@ -63,7 +63,7 @@ class FileUtils { static Status remove_paths(const std::vector& paths); // List all files in the specified directory without '.' and '..'. - // If you want retreive all files, you can use Env::iterate_dir. + // If you want retrieve all files, you can use Env::iterate_dir. // All valid files will be stored in given *files. static Status list_files( Env* env, @@ -94,11 +94,11 @@ class FileUtils { // "" if this fd is invalid static std::string path_of_fd(int fd); - // split pathes in configue file to path + // split paths in configure file to path // for example // "/home/disk1/;/home/disk2" // will split to ['/home/disk1', '/home/disk2'] - static Status split_pathes(const char* path, std::vector* path_vec); + static Status split_paths(const char* path, std::vector* path_vec); // copy the file from src path to dest path, it will overwrite the existing files static Status copy_file(const std::string& src_path, const std::string& dest_path); diff --git a/be/src/util/mutex.h b/be/src/util/mutex.h index 2fcd756a0625d8..c94aee1ee50fd9 100644 --- a/be/src/util/mutex.h +++ b/be/src/util/mutex.h @@ -51,7 +51,7 @@ class Mutex { }; // Helper class than locks a mutex on construction -// and unlocks the mutex on descontruction. +// and unlocks the mutex on deconstruction. class MutexLock { public: // wait until obtain the lock diff --git a/be/src/util/mysql_global.h b/be/src/util/mysql_global.h index d933dff930e581..55b7342cc323bf 100644 --- a/be/src/util/mysql_global.h +++ b/be/src/util/mysql_global.h @@ -39,7 +39,7 @@ typedef unsigned char uchar; #define MAX_MEDIUMINT_WIDTH 8 /* Max width for a INT24 w.o. sign */ #define MAX_INT_WIDTH 10 /* Max width for a LONG w.o. sign */ #define MAX_BIGINT_WIDTH 20 /* Max width for a LONGLONG */ -#define MAX_CHAR_WIDTH 255 /* Max length for a CHAR colum */ +#define MAX_CHAR_WIDTH 255 /* Max length for a CHAR column */ #define MAX_BLOB_WIDTH 16777216 /* Default width for blob */ #define MAX_DECPT_FOR_F_FORMAT DBL_DIG diff --git a/be/src/util/mysql_load_error_hub.cpp b/be/src/util/mysql_load_error_hub.cpp index da6004b70c9ed7..f77a68008b3b01 100644 --- a/be/src/util/mysql_load_error_hub.cpp +++ b/be/src/util/mysql_load_error_hub.cpp @@ -130,7 +130,7 @@ Status MysqlLoadErrorHub::open_mysql_conn(MYSQL** my_conn) { << "Host: " << _info.host << " port: " << _info.port << " user: " << _info.user << " passwd: " << _info.passwd << " db: " << _info.db; - return error_status("loal error mysql real connect failed.", *my_conn); + return error_status("load error mysql real connect failed.", *my_conn); } return Status::OK(); @@ -145,7 +145,7 @@ Status MysqlLoadErrorHub::error_status(const std::string& prefix, MYSQL* my_conn std::string MysqlLoadErrorHub::debug_string() const { std::stringstream out; - out << "(tatal_error_num=" << _total_error_num << ")"; + out << "(total_error_num=" << _total_error_num << ")"; return out.str(); } diff --git a/be/src/util/mysql_row_buffer.cpp b/be/src/util/mysql_row_buffer.cpp index 1585a36782f9fc..5472dd94f2ebbe 100644 --- a/be/src/util/mysql_row_buffer.cpp +++ b/be/src/util/mysql_row_buffer.cpp @@ -105,7 +105,7 @@ int MysqlRowBuffer::push_tinyint(int8_t data) { int ret = reserve(3 + MAX_TINYINT_WIDTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -126,7 +126,7 @@ int MysqlRowBuffer::push_smallint(int16_t data) { int ret = reserve(3 + MAX_SMALLINT_WIDTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -147,7 +147,7 @@ int MysqlRowBuffer::push_int(int32_t data) { int ret = reserve(3 + MAX_INT_WIDTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -168,7 +168,7 @@ int MysqlRowBuffer::push_bigint(int64_t data) { int ret = reserve(3 + MAX_BIGINT_WIDTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -189,7 +189,7 @@ int MysqlRowBuffer::push_unsigned_bigint(uint64_t data) { int ret = reserve(4 + MAX_BIGINT_WIDTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -210,7 +210,7 @@ int MysqlRowBuffer::push_float(float data) { int ret = reserve(3 + MAX_FLOAT_STR_LENGTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -231,7 +231,7 @@ int MysqlRowBuffer::push_double(double data) { int ret = reserve(3 + MAX_DOUBLE_STR_LENGTH); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -257,7 +257,7 @@ int MysqlRowBuffer::push_string(const char* str, int length) { int ret = reserve(9 + length); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -271,7 +271,7 @@ int MysqlRowBuffer::push_null() { int ret = reserve(1); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return ret; } @@ -284,7 +284,7 @@ char* MysqlRowBuffer::reserved(int size) { int ret = reserve(size); if (0 != ret) { - LOG(ERROR) << "mysql row buffer reserver failed."; + LOG(ERROR) << "mysql row buffer reserve failed."; return NULL; } diff --git a/be/src/util/null_load_error_hub.cpp b/be/src/util/null_load_error_hub.cpp index 5479b1ac1850b3..b21f63fdadfa59 100644 --- a/be/src/util/null_load_error_hub.cpp +++ b/be/src/util/null_load_error_hub.cpp @@ -43,7 +43,7 @@ Status NullLoadErrorHub::close() { std::string NullLoadErrorHub::debug_string() const { std::stringstream out; - out << "NullLoadErrorHub(tatal_error_num=" << _total_error_num << ")"; + out << "NullLoadErrorHub(total_error_num=" << _total_error_num << ")"; return out.str(); } diff --git a/be/src/util/path_trie.hpp b/be/src/util/path_trie.hpp index db9939eb287692..db309867f33fc3 100644 --- a/be/src/util/path_trie.hpp +++ b/be/src/util/path_trie.hpp @@ -72,7 +72,7 @@ class PathTrie { } } - // Return true if insert sucess. + // Return true if insert success. bool insert(const std::vector path, int index, const T& value) { if (index >= path.size()) { return false; diff --git a/be/src/util/system_metrics.cpp b/be/src/util/system_metrics.cpp index 5dbccf4d45ee52..8f0728388fe537 100644 --- a/be/src/util/system_metrics.cpp +++ b/be/src/util/system_metrics.cpp @@ -338,7 +338,7 @@ void SystemMetrics::_update_disk_metrics() { // 12 - I/Os currently in progress // 13 - time spent doing I/Os (ms) // 14 - weighted time spent doing I/Os (ms) - // I think 1024 is enougth for device name + // I think 1024 is enough for device name int major = 0; int minor = 0; char device[1024]; diff --git a/be/src/util/tdigest.h b/be/src/util/tdigest.h index 57cb4159bb45e4..4327d4176f16be 100644 --- a/be/src/util/tdigest.h +++ b/be/src/util/tdigest.h @@ -280,7 +280,7 @@ class TDigest { VLOG(1) << "cdf value " << x; VLOG(1) << "processed size " << _processed.size(); if (_processed.size() == 0) { - // no data to examin_e + // no data to examine VLOG(1) << "no processed values"; return 0.0; diff --git a/be/src/util/thrift_rpc_helper.cpp b/be/src/util/thrift_rpc_helper.cpp index d4b9249a3f93de..a8d6209f416080 100644 --- a/be/src/util/thrift_rpc_helper.cpp +++ b/be/src/util/thrift_rpc_helper.cpp @@ -74,7 +74,7 @@ Status ThriftRpcHelper::rpc( SleepFor(MonoDelta::FromMilliseconds(config::thrift_client_retry_interval_ms)); status = client.reopen(timeout_ms); if (!status.ok()) { - LOG(WARNING) << "client repoen failed. address=" << address + LOG(WARNING) << "client reopen failed. address=" << address << ", status=" << status.get_error_msg(); return status; } diff --git a/be/test/env/env_posix_test.cpp b/be/test/env/env_posix_test.cpp index 110628ce9ccaba..95ae2be935221e 100644 --- a/be/test/env/env_posix_test.cpp +++ b/be/test/env/env_posix_test.cpp @@ -47,7 +47,7 @@ TEST_F(EnvPosixTest, random_access) { ASSERT_TRUE(st.ok()); st = wfile->pre_allocate(1024); ASSERT_TRUE(st.ok()); - // wirte data + // write data Slice field1("123456789"); st = wfile->append(field1); ASSERT_TRUE(st.ok()); @@ -110,7 +110,7 @@ TEST_F(EnvPosixTest, random_rw) { auto env = Env::Default(); auto st = env->new_random_rw_file(fname, &wfile); ASSERT_TRUE(st.ok()); - // wirte data + // write data Slice field1("123456789"); st = wfile->write_at(0, field1); ASSERT_TRUE(st.ok()); diff --git a/be/test/exec/es_query_builder_test.cpp b/be/test/exec/es_query_builder_test.cpp index f35ec6cbed2a0e..24b82312ba4168 100644 --- a/be/test/exec/es_query_builder_test.cpp +++ b/be/test/exec/es_query_builder_test.cpp @@ -515,9 +515,9 @@ TEST_F(BooleanQueryBuilderTest, validate_compound_and) { std::string term_field_name = "content"; ExtBinaryPredicate* term_ne_predicate = new ExtBinaryPredicate(TExprNodeType::BINARY_PRED, term_field_name, term_type_desc, TExprOpcode::NE, term_literal); - std::vector innner_or_content = {term_ne_predicate, in_predicate}; + std::vector inner_or_content = {term_ne_predicate, in_predicate}; - EsPredicate* innner_or_predicate = new EsPredicate(innner_or_content); + EsPredicate* inner_or_predicate = new EsPredicate(inner_or_content); char range_value_str[] = "a"; // k >= "a" int range_value_length = (int)strlen(range_value_str); @@ -529,9 +529,9 @@ TEST_F(BooleanQueryBuilderTest, validate_compound_and) { std::vector range_predicates = {range_predicate}; EsPredicate* left_inner_or_predicate = new EsPredicate(range_predicates); - std::vector ourter_left_predicates_1 = {left_inner_or_predicate, innner_or_predicate}; + std::vector outer_left_predicates_1 = {left_inner_or_predicate, inner_or_predicate}; - ExtCompPredicates* comp_predicate = new ExtCompPredicates(TExprOpcode::COMPOUND_AND, ourter_left_predicates_1); + ExtCompPredicates* comp_predicate = new ExtCompPredicates(TExprOpcode::COMPOUND_AND, outer_left_predicates_1); char like_value[] = "a%e%g_"; int like_value_length = (int)strlen(like_value); diff --git a/be/test/exec/es_scan_reader_test.cpp b/be/test/exec/es_scan_reader_test.cpp index 28976b22eafd52..a30033a972dfdf 100644 --- a/be/test/exec/es_scan_reader_test.cpp +++ b/be/test/exec/es_scan_reader_test.cpp @@ -62,18 +62,18 @@ class RestSearchAction : public HttpHandler { rapidjson::Value outer_hits(rapidjson::kObjectType); outer_hits.AddMember("total", 10, allocator); rapidjson::Value inner_hits(rapidjson::kArrayType); - rapidjson::Value source_docuement(rapidjson::kObjectType); - source_docuement.AddMember("id", 1, allocator); + rapidjson::Value source_document(rapidjson::kObjectType); + source_document.AddMember("id", 1, allocator); rapidjson::Value value_node("1", allocator); - source_docuement.AddMember("value", value_node, allocator); - inner_hits.PushBack(source_docuement, allocator); + source_document.AddMember("value", value_node, allocator); + inner_hits.PushBack(source_document, allocator); outer_hits.AddMember("hits", inner_hits, allocator); search_result.AddMember("hits", outer_hits, allocator); rapidjson::StringBuffer buffer; rapidjson::Writer writer(buffer); search_result.Accept(writer); - //send DELETE scorll post request + //send DELETE scroll post request std::string search_result_json = buffer.GetString(); HttpChannel::send_reply(req, search_result_json); } else { @@ -117,7 +117,7 @@ class RestSearchScrollAction : public HttpHandler { rapidjson::StringBuffer buffer; rapidjson::Writer writer(buffer); end_search_result.Accept(writer); - //send DELETE scorll post request + //send DELETE scroll post request std::string end_search_result_json = buffer.GetString(); HttpChannel::send_reply(req, end_search_result_json); return; @@ -132,18 +132,18 @@ class RestSearchScrollAction : public HttpHandler { rapidjson::Value outer_hits(rapidjson::kObjectType); outer_hits.AddMember("total", 1, allocator); rapidjson::Value inner_hits(rapidjson::kArrayType); - rapidjson::Value source_docuement(rapidjson::kObjectType); - source_docuement.AddMember("id", start, allocator); + rapidjson::Value source_document(rapidjson::kObjectType); + source_document.AddMember("id", start, allocator); rapidjson::Value value_node(std::to_string(start).c_str(), allocator); - source_docuement.AddMember("value", value_node, allocator); - inner_hits.PushBack(source_docuement, allocator); + source_document.AddMember("value", value_node, allocator); + inner_hits.PushBack(source_document, allocator); outer_hits.AddMember("hits", inner_hits, allocator); search_result.AddMember("hits", outer_hits, allocator); rapidjson::StringBuffer buffer; rapidjson::Writer writer(buffer); search_result.Accept(writer); - //send DELETE scorll post request + //send DELETE scroll post request std::string search_result_json = buffer.GetString(); HttpChannel::send_reply(req, search_result_json); return; diff --git a/be/test/exec/json_scanner_test.cpp b/be/test/exec/json_scanner_test.cpp index c49282fc0e1137..93805497c9648c 100644 --- a/be/test/exec/json_scanner_test.cpp +++ b/be/test/exec/json_scanner_test.cpp @@ -37,9 +37,9 @@ namespace doris { -class JsonSannerTest : public testing::Test { +class JsonScannerTest : public testing::Test { public: - JsonSannerTest() : _runtime_state(TQueryGlobals()) { + JsonScannerTest() : _runtime_state(TQueryGlobals()) { init(); _runtime_state._instance_mem_tracker.reset(new MemTracker()); _runtime_state._exec_env = ExecEnv::GetInstance(); @@ -70,12 +70,12 @@ class JsonSannerTest : public testing::Test { #define TUPLE_ID_DST 0 #define TUPLE_ID_SRC 1 -#define CLOMN_NUMBERS 4 +#define COLUMN_NUMBERS 4 #define DST_TUPLE_SLOT_ID_START 1 #define SRC_TUPLE_SLOT_ID_START 5 -int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { - const char *clomnNames[] = {"category","author","title","price"}; - for (int i = 0; i < CLOMN_NUMBERS; i++) +int JsonScannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { + const char *columnNames[] = {"category","author","title","price"}; + for (int i = 0; i < COLUMN_NUMBERS; i++) { TSlotDescriptor slot_desc; @@ -96,7 +96,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl slot_desc.byteOffset = i*16+8; slot_desc.nullIndicatorByte = i/8; slot_desc.nullIndicatorBit = i%8; - slot_desc.colName = clomnNames[i]; + slot_desc.colName = columnNames[i]; slot_desc.slotIdx = i + 1; slot_desc.isMaterialized = true; @@ -107,7 +107,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl // TTupleDescriptor source TTupleDescriptor t_tuple_desc; t_tuple_desc.id = TUPLE_ID_SRC; - t_tuple_desc.byteSize = CLOMN_NUMBERS*16+8; + t_tuple_desc.byteSize = COLUMN_NUMBERS*16+8; t_tuple_desc.numNullBytes = 0; t_tuple_desc.tableId = 0; t_tuple_desc.__isset.tableId = true; @@ -116,7 +116,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl return next_slot_id; } -int JsonSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { +int JsonScannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { int32_t byteOffset = 8; {//category TSlotDescriptor slot_desc; @@ -238,7 +238,7 @@ int JsonSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_sl return next_slot_id; } -void JsonSannerTest::init_desc_table() { +void JsonScannerTest::init_desc_table() { TDescriptorTable t_desc_table; // table descriptors @@ -262,7 +262,7 @@ void JsonSannerTest::init_desc_table() { _runtime_state.set_desc_tbl(_desc_tbl); } -void JsonSannerTest::create_expr_info() { +void JsonScannerTest::create_expr_info() { TTypeDesc varchar_type; { TTypeNode node; @@ -369,7 +369,7 @@ void JsonSannerTest::create_expr_info() { _params.__set_src_tuple_id(TUPLE_ID_SRC); } -void JsonSannerTest::init() { +void JsonScannerTest::init() { create_expr_info(); init_desc_table(); @@ -385,7 +385,7 @@ void JsonSannerTest::init() { _tnode.__isset.broker_scan_node = true; } -TEST_F(JsonSannerTest, normal_simple_arrayjson) { +TEST_F(JsonScannerTest, normal_simple_arrayjson) { BrokerScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); auto status = scan_node.prepare(&_runtime_state); ASSERT_TRUE(status.ok()); diff --git a/be/test/exec/json_scanner_test_with_jsonpath.cpp b/be/test/exec/json_scanner_test_with_jsonpath.cpp index 9a24e684df2533..0b9f9c2765f2f7 100644 --- a/be/test/exec/json_scanner_test_with_jsonpath.cpp +++ b/be/test/exec/json_scanner_test_with_jsonpath.cpp @@ -37,9 +37,9 @@ namespace doris { -class JsonSannerTest : public testing::Test { +class JsonScannerTest : public testing::Test { public: - JsonSannerTest() : _runtime_state(TQueryGlobals()) { + JsonScannerTest() : _runtime_state(TQueryGlobals()) { init(); _runtime_state._instance_mem_tracker.reset(new MemTracker()); _runtime_state._exec_env = ExecEnv::GetInstance(); @@ -70,12 +70,12 @@ class JsonSannerTest : public testing::Test { #define TUPLE_ID_DST 0 #define TUPLE_ID_SRC 1 -#define CLOMN_NUMBERS 4 +#define COLUMN_NUMBERS 4 #define DST_TUPLE_SLOT_ID_START 1 #define SRC_TUPLE_SLOT_ID_START 5 -int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { - const char *clomnNames[] = {"k1", "kind", "ip", "value"}; - for (int i = 0; i < CLOMN_NUMBERS; i++) +int JsonScannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { + const char *columnNames[] = {"k1", "kind", "ip", "value"}; + for (int i = 0; i < COLUMN_NUMBERS; i++) { TSlotDescriptor slot_desc; @@ -96,7 +96,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl slot_desc.byteOffset = i*16+8; slot_desc.nullIndicatorByte = i/8; slot_desc.nullIndicatorBit = i%8; - slot_desc.colName = clomnNames[i]; + slot_desc.colName = columnNames[i]; slot_desc.slotIdx = i + 1; slot_desc.isMaterialized = true; @@ -107,7 +107,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl // TTupleDescriptor source TTupleDescriptor t_tuple_desc; t_tuple_desc.id = TUPLE_ID_SRC; - t_tuple_desc.byteSize = CLOMN_NUMBERS*16+8; + t_tuple_desc.byteSize = COLUMN_NUMBERS*16+8; t_tuple_desc.numNullBytes = 0; t_tuple_desc.tableId = 0; t_tuple_desc.__isset.tableId = true; @@ -116,7 +116,7 @@ int JsonSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_sl return next_slot_id; } -int JsonSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { +int JsonScannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { int32_t byteOffset = 8; {//k1 TSlotDescriptor slot_desc; @@ -238,7 +238,7 @@ int JsonSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_sl return next_slot_id; } -void JsonSannerTest::init_desc_table() { +void JsonScannerTest::init_desc_table() { TDescriptorTable t_desc_table; // table descriptors @@ -262,7 +262,7 @@ void JsonSannerTest::init_desc_table() { _runtime_state.set_desc_tbl(_desc_tbl); } -void JsonSannerTest::create_expr_info() { +void JsonScannerTest::create_expr_info() { TTypeDesc varchar_type; { TTypeNode node; @@ -343,7 +343,7 @@ void JsonSannerTest::create_expr_info() { _params.__set_src_tuple_id(TUPLE_ID_SRC); } -void JsonSannerTest::init() { +void JsonScannerTest::init() { create_expr_info(); init_desc_table(); @@ -359,7 +359,7 @@ void JsonSannerTest::init() { _tnode.__isset.broker_scan_node = true; } -TEST_F(JsonSannerTest, normal) { +TEST_F(JsonScannerTest, normal) { BrokerScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); auto status = scan_node.prepare(&_runtime_state); ASSERT_TRUE(status.ok()); diff --git a/be/test/exec/parquet_scanner_test.cpp b/be/test/exec/parquet_scanner_test.cpp index bdfe5303bb68f7..a2ba7f65784144 100644 --- a/be/test/exec/parquet_scanner_test.cpp +++ b/be/test/exec/parquet_scanner_test.cpp @@ -36,9 +36,9 @@ namespace doris { -class ParquetSannerTest : public testing::Test { +class ParquetScannerTest : public testing::Test { public: - ParquetSannerTest() : _runtime_state(TQueryGlobals()) { + ParquetScannerTest() : _runtime_state(TQueryGlobals()) { init(); _runtime_state._instance_mem_tracker.reset(new MemTracker()); } @@ -68,15 +68,15 @@ class ParquetSannerTest : public testing::Test { #define TUPLE_ID_DST 0 #define TUPLE_ID_SRC 1 -#define CLOMN_NUMBERS 20 +#define COLUMN_NUMBERS 20 #define DST_TUPLE_SLOT_ID_START 1 #define SRC_TUPLE_SLOT_ID_START 21 -int ParquetSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { - const char *clomnNames[] = {"log_version", "log_time", "log_time_stamp", "js_version", "vst_cookie", +int ParquetScannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { + const char *columnNames[] = {"log_version", "log_time", "log_time_stamp", "js_version", "vst_cookie", "vst_ip", "vst_user_id", "vst_user_agent", "device_resolution", "page_url", "page_refer_url", "page_yyid", "page_type", "pos_type", "content_id", "media_id", "spm_cnt", "spm_pre", "scm_cnt", "partition_column"}; - for (int i = 0; i < CLOMN_NUMBERS; i++) + for (int i = 0; i < COLUMN_NUMBERS; i++) { TSlotDescriptor slot_desc; @@ -97,7 +97,7 @@ int ParquetSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next slot_desc.byteOffset = i*16+8; // 跳过前8个字节 这8个字节用于表示字段是否为null值 slot_desc.nullIndicatorByte = i/8; slot_desc.nullIndicatorBit = i%8; - slot_desc.colName = clomnNames[i]; + slot_desc.colName = columnNames[i]; slot_desc.slotIdx = i + 1; slot_desc.isMaterialized = true; @@ -108,7 +108,7 @@ int ParquetSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next // TTupleDescriptor source TTupleDescriptor t_tuple_desc; t_tuple_desc.id = TUPLE_ID_SRC; - t_tuple_desc.byteSize = CLOMN_NUMBERS*16+8;//此处8字节为了处理null值 + t_tuple_desc.byteSize = COLUMN_NUMBERS*16+8;//此处8字节为了处理null值 t_tuple_desc.numNullBytes = 0; t_tuple_desc.tableId = 0; t_tuple_desc.__isset.tableId = true; @@ -117,7 +117,7 @@ int ParquetSannerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next return next_slot_id; } -int ParquetSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { +int ParquetScannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { int32_t byteOffset = 8; // 跳过前8个字节 这8个字节用于表示字段是否为null值 {//log_version TSlotDescriptor slot_desc; @@ -198,11 +198,11 @@ int ParquetSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next t_desc_table.slotDescriptors.push_back(slot_desc); } byteOffset += 8; - const char *clomnNames[] = {"log_version", "log_time", "log_time_stamp", "js_version", "vst_cookie", + const char *columnNames[] = {"log_version", "log_time", "log_time_stamp", "js_version", "vst_cookie", "vst_ip", "vst_user_id", "vst_user_agent", "device_resolution", "page_url", "page_refer_url", "page_yyid", "page_type", "pos_type", "content_id", "media_id", "spm_cnt", "spm_pre", "scm_cnt", "partition_column"}; - for (int i = 3; i < CLOMN_NUMBERS; i++, byteOffset+=16) + for (int i = 3; i < COLUMN_NUMBERS; i++, byteOffset+=16) { TSlotDescriptor slot_desc; @@ -223,7 +223,7 @@ int ParquetSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next slot_desc.byteOffset = byteOffset; slot_desc.nullIndicatorByte = i/8; slot_desc.nullIndicatorBit = i%8; - slot_desc.colName = clomnNames[i]; + slot_desc.colName = columnNames[i]; slot_desc.slotIdx = i+1; slot_desc.isMaterialized = true; @@ -244,7 +244,7 @@ int ParquetSannerTest::create_dst_tuple(TDescriptorTable& t_desc_table, int next return next_slot_id; } -void ParquetSannerTest::init_desc_table() { +void ParquetScannerTest::init_desc_table() { TDescriptorTable t_desc_table; // table descriptors @@ -268,7 +268,7 @@ void ParquetSannerTest::init_desc_table() { _runtime_state.set_desc_tbl(_desc_tbl); } -void ParquetSannerTest::create_expr_info() { +void ParquetScannerTest::create_expr_info() { TTypeDesc varchar_type; { TTypeNode node; @@ -379,8 +379,8 @@ void ParquetSannerTest::create_expr_info() { _params.expr_of_dest_slot.emplace(DST_TUPLE_SLOT_ID_START + 2, expr); _params.src_slot_ids.push_back(SRC_TUPLE_SLOT_ID_START + 2); } - // could't convert type - for (int i = 3; i < CLOMN_NUMBERS; i++) + // couldn't convert type + for (int i = 3; i < COLUMN_NUMBERS; i++) { TExprNode slot_ref; slot_ref.node_type = TExprNodeType::SLOT_REF; @@ -402,7 +402,7 @@ void ParquetSannerTest::create_expr_info() { _params.__set_src_tuple_id(TUPLE_ID_SRC); } -void ParquetSannerTest::init() { +void ParquetScannerTest::init() { create_expr_info(); init_desc_table(); @@ -418,7 +418,7 @@ void ParquetSannerTest::init() { _tnode.__isset.broker_scan_node = true; } -TEST_F(ParquetSannerTest, normal) { +TEST_F(ParquetScannerTest, normal) { BrokerScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); auto status = scan_node.prepare(&_runtime_state); ASSERT_TRUE(status.ok()); diff --git a/be/test/exec/schema_scan_node_test.cpp b/be/test/exec/schema_scan_node_test.cpp index 45fdee4fd7c94c..a87394bfb8c00d 100644 --- a/be/test/exec/schema_scan_node_test.cpp +++ b/be/test/exec/schema_scan_node_test.cpp @@ -38,7 +38,7 @@ namespace doris { // mock class SchemaScanNodeTest : public testing::Test { public: - SchemaScanNodeTest() : _runtim_state("test") { + SchemaScanNodeTest() : runtime_state("test") { TDescriptorTable t_desc_table; // table descriptors @@ -82,7 +82,7 @@ class SchemaScanNodeTest : public testing::Test { DescriptorTbl::create(&_obj_pool, t_desc_table, &_desc_tbl); - _runtim_state.set_desc_tbl(_desc_tbl); + runtime_state.set_desc_tbl(_desc_tbl); // Node Id _tnode.node_id = 0; @@ -106,14 +106,14 @@ class SchemaScanNodeTest : public testing::Test { TPlanNode _tnode; ObjectPool _obj_pool; DescriptorTbl* _desc_tbl; - RuntimeState _runtim_state; + RuntimeState runtime_state; }; TEST_F(SchemaScanNodeTest, normal_use) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); + Status status = scan_node.prepare(&runtime_state); ASSERT_TRUE(status.ok()); - status = scan_node.prepare(&_runtim_state); + status = scan_node.prepare(&runtime_state); ASSERT_TRUE(status.ok()); std::vector scan_ranges; status = scan_node.set_scan_ranges(scan_ranges); @@ -122,13 +122,13 @@ TEST_F(SchemaScanNodeTest, normal_use) { scan_node.debug_string(1, &out); LOG(WARNING) << out.str(); - status = scan_node.open(&_runtim_state); + status = scan_node.open(&runtime_state); ASSERT_TRUE(status.ok()); RowBatch row_batch(scan_node._row_descriptor, 100); bool eos = false; while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + status = scan_node.get_next(&runtime_state, &row_batch, &eos); ASSERT_TRUE(status.ok()); if (!eos) { @@ -139,21 +139,21 @@ TEST_F(SchemaScanNodeTest, normal_use) { } } - status = scan_node.close(&_runtim_state); + status = scan_node.close(&runtime_state); ASSERT_TRUE(status.ok()); } TEST_F(SchemaScanNodeTest, Prepare_fail_1) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); TableDescriptor* old = _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = NULL; - Status status = scan_node.prepare(&_runtim_state); + Status status = scan_node.prepare(&runtime_state); ASSERT_FALSE(status.ok()); _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = old; } TEST_F(SchemaScanNodeTest, Prepare_fail_2) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); scan_node._tuple_id = 1; - Status status = scan_node.prepare(&_runtim_state); + Status status = scan_node.prepare(&runtime_state); ASSERT_FALSE(status.ok()); } TEST_F(SchemaScanNodeTest, dummy) { @@ -161,9 +161,9 @@ TEST_F(SchemaScanNodeTest, dummy) { _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; t_desc->_schema_table_type = TSchemaTableType::SCH_EVENTS; SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); + Status status = scan_node.prepare(&runtime_state); ASSERT_TRUE(status.ok()); - status = scan_node.prepare(&_runtim_state); + status = scan_node.prepare(&runtime_state); ASSERT_TRUE(status.ok()); std::vector scan_ranges; status = scan_node.set_scan_ranges(scan_ranges); @@ -172,13 +172,13 @@ TEST_F(SchemaScanNodeTest, dummy) { scan_node.debug_string(1, &out); LOG(WARNING) << out.str(); - status = scan_node.open(&_runtim_state); + status = scan_node.open(&runtime_state); ASSERT_TRUE(status.ok()); RowBatch row_batch(scan_node._row_descriptor, 100); bool eos = false; while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + status = scan_node.get_next(&runtime_state, &row_batch, &eos); ASSERT_TRUE(status.ok()); if (!eos) { @@ -189,25 +189,25 @@ TEST_F(SchemaScanNodeTest, dummy) { } } - status = scan_node.close(&_runtim_state); + status = scan_node.close(&runtime_state); ASSERT_TRUE(status.ok()); t_desc->_schema_table_type = TSchemaTableType::SCH_AUTHORS; } TEST_F(SchemaScanNodeTest, get_dest_desc_fail) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); scan_node._tuple_id = 1; - Status status = scan_node.prepare(&_runtim_state); + Status status = scan_node.prepare(&runtime_state); ASSERT_FALSE(status.ok()); } TEST_F(SchemaScanNodeTest, invalid_param) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); Status status = scan_node.prepare(NULL); ASSERT_FALSE(status.ok()); - status = scan_node.prepare(&_runtim_state); + status = scan_node.prepare(&runtime_state); ASSERT_TRUE(status.ok()); status = scan_node.open(NULL); ASSERT_FALSE(status.ok()); - status = scan_node.open(&_runtim_state); + status = scan_node.open(&runtime_state); ASSERT_TRUE(status.ok()); RowBatch row_batch(scan_node._row_descriptor, 100); bool eos; @@ -217,13 +217,13 @@ TEST_F(SchemaScanNodeTest, invalid_param) { TEST_F(SchemaScanNodeTest, no_init) { SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - //Status status = scan_node.prepare(&_runtim_state); + //Status status = scan_node.prepare(&runtime_state); //ASSERT_TRUE(status.ok()); - Status status = scan_node.open(&_runtim_state); + Status status = scan_node.open(&runtime_state); ASSERT_FALSE(status.ok()); RowBatch row_batch(scan_node._row_descriptor, 100); bool eos; - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + status = scan_node.get_next(&runtime_state, &row_batch, &eos); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp b/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp index e45f747f7e7ebc..8be96fb311449a 100644 --- a/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp @@ -44,7 +44,7 @@ class SchemaAuthorScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaAuthorScannerTest, normal_use) { SchemaAuthorsScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp b/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp index 89db0c7adf3cb4..ab118aefa06fa6 100644 --- a/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp @@ -45,7 +45,7 @@ class SchemaCharsetsScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaCharsetsScannerTest, normal_use) { SchemaCharsetsScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp b/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp index 3ae63a846a22d2..934f65527b3a26 100644 --- a/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp @@ -45,7 +45,7 @@ class SchemaCollationsScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaCollationsScannerTest, normal_use) { SchemaCollationsScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp index b9cfc173e3c0c5..d21008798c269b 100644 --- a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp @@ -91,7 +91,7 @@ class SchemaColumnsScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaColumnsScannerTest, normal_use) { SchemaColumnsScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp index af06cd43cc965a..fd1f7d892e3072 100644 --- a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp @@ -91,7 +91,7 @@ class SchemaCreateTableScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaCreateTableScannerTest, normal_use) { SchemaCreateTableScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp b/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp index 3304aae43b0091..9f8d88904a340e 100644 --- a/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp @@ -45,7 +45,7 @@ class SchemaEnginesScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaEnginesScannerTest, normal_use) { SchemaEnginesScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp index 387afacffa994c..5e45e4a2ad90c9 100644 --- a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp @@ -91,7 +91,7 @@ class SchemaOpenTablesScannerTest : public testing::Test { std::string _wild; }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaOpenTablesScannerTest, normal_use) { SchemaOpenTablesScanner scanner; Status status = scanner.init(&_param, &_obj_pool); diff --git a/be/test/exec/schema_scanner_test.cpp b/be/test/exec/schema_scanner_test.cpp index f0505b4efd695b..46a63f41c834cb 100644 --- a/be/test/exec/schema_scanner_test.cpp +++ b/be/test/exec/schema_scanner_test.cpp @@ -52,7 +52,7 @@ SchemaScanner::ColumnDesc s_test_columns[] = { { "is_null", TYPE_VARCHAR, sizeof(StringValue), true }, }; -char g_tuple_buf[10000];// enougth for tuple +char g_tuple_buf[10000];// enough for tuple TEST_F(SchemaScannerTest, normal_use) { SchemaScanner scanner(s_test_columns, sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); diff --git a/be/test/exec/set_executor_test.cpp b/be/test/exec/set_executor_test.cpp index b19afc06dd23ea..b1bfd5167f013c 100644 --- a/be/test/exec/set_executor_test.cpp +++ b/be/test/exec/set_executor_test.cpp @@ -27,13 +27,13 @@ namespace doris { class SetExecutorTest : public testing::Test { public: SetExecutorTest() : - _runtim_state("tmp") { + _runtime_state("tmp") { } virtual void SetUp() { } private: - RuntimeState _runtim_state; + RuntimeState _runtime_state; }; TEST_F(SetExecutorTest, normal_case) { @@ -84,7 +84,7 @@ TEST_F(SetExecutorTest, normal_case) { } SetExecutor executor(&doris_server, params); RowDescriptor row_desc; - Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); + Status status = executor.prepare((RuntimeState*)&_runtime_state, row_desc); ASSERT_TRUE(status.ok()); LOG(INFO) << executor.debug_string(); } @@ -107,7 +107,7 @@ TEST_F(SetExecutorTest, failed_case) { } SetExecutor executor(&doris_server, params); RowDescriptor row_desc; - Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); + Status status = executor.prepare((RuntimeState*)&_runtime_state, row_desc); ASSERT_FALSE(status.ok()); LOG(INFO) << executor.debug_string(); } diff --git a/be/test/exec/tablet_sink_test.cpp b/be/test/exec/tablet_sink_test.cpp index ca18445a8c9e0c..54e3fea9a3f786 100644 --- a/be/test/exec/tablet_sink_test.cpp +++ b/be/test/exec/tablet_sink_test.cpp @@ -99,7 +99,7 @@ TDataSink get_data_sink(TDescriptorTable* desc_tbl) { tsink.db_name = "testDb"; tsink.table_name = "testTable"; - // cosntruct schema + // construct schema TOlapTableSchemaParam& tschema = tsink.schema; tschema.db_id = 1; tschema.table_id = 2; @@ -223,7 +223,7 @@ TDataSink get_decimal_sink(TDescriptorTable* desc_tbl) { tsink.db_name = "testDb"; tsink.table_name = "testTable"; - // cosntruct schema + // construct schema TOlapTableSchemaParam& tschema = tsink.schema; tschema.db_id = 1; tschema.table_id = 2; diff --git a/be/test/exprs/CMakeLists.txt b/be/test/exprs/CMakeLists.txt index ba210a0d8cb45d..fef817236478fa 100644 --- a/be/test/exprs/CMakeLists.txt +++ b/be/test/exprs/CMakeLists.txt @@ -25,7 +25,7 @@ ADD_BE_TEST(json_function_test) #ADD_BE_TEST(binary_predicate_test) #ADD_BE_TEST(in_predicate_test) #ADD_BE_TEST(expr-test) -#ADD_BE_TEST(hybird_set_test) +#ADD_BE_TEST(hybrid_set_test) ADD_BE_TEST(string_functions_test) ADD_BE_TEST(timestamp_functions_test) ADD_BE_TEST(percentile_approx_test) diff --git a/be/test/exprs/binary_predicate_test.cpp b/be/test/exprs/binary_predicate_test.cpp index 9c056fddde1beb..6b58af09e172d9 100644 --- a/be/test/exprs/binary_predicate_test.cpp +++ b/be/test/exprs/binary_predicate_test.cpp @@ -200,30 +200,30 @@ TEST_F(BinaryOpTest, SimplePerformanceTest) { ASSERT_TRUE(expr != NULL); ASSERT_TRUE(expr->prepare(runtime_state(), *row_desc()).ok()); int size = 1024 * 1024 / capacity; - VectorizedRowBatch* vec_row_batchs[size]; + VectorizedRowBatch* vec_row_batches[size]; srand(time(NULL)); for (int i = 0; i < size; ++i) { - vec_row_batchs[i] = object_pool()->add( + vec_row_batches[i] = object_pool()->add( new VectorizedRowBatch(_schema, capacity)); - MemPool* mem_pool = vec_row_batchs[i]->mem_pool(); + MemPool* mem_pool = vec_row_batches[i]->mem_pool(); int32_t* vec_data = reinterpret_cast( mem_pool->allocate(sizeof(int32_t) * capacity)); - vec_row_batchs[i]->column(0)->set_col_data(vec_data); + vec_row_batches[i]->column(0)->set_col_data(vec_data); for (int i = 0; i < capacity; ++i) { vec_data[i] = rand() % 20; } - vec_row_batchs[i]->set_size(capacity); + vec_row_batches[i]->set_size(capacity); } - RowBatch* row_batchs[size]; + RowBatch* row_batches[size]; for (int i = 0; i < size; ++i) { - row_batchs[i] = object_pool()->add(new RowBatch(*row_desc(), capacity)); - vec_row_batchs[i]->to_row_batch( - row_batchs[i], + row_batches[i] = object_pool()->add(new RowBatch(*row_desc(), capacity)); + vec_row_batches[i]->to_row_batch( + row_batches[i], *runtime_state()->desc_tbl().get_tuple_descriptor(0)); } @@ -231,7 +231,7 @@ TEST_F(BinaryOpTest, SimplePerformanceTest) { stopwatch.start(); for (int i = 0; i < size; ++i) { - expr->evaluate(vec_row_batchs[i]); + expr->evaluate(vec_row_batches[i]); } uint64_t vec_time = stopwatch.elapsed_time(); @@ -241,7 +241,7 @@ TEST_F(BinaryOpTest, SimplePerformanceTest) { for (int i = 0; i < size; ++i) { for (int j = 0; j < capacity; ++j) { - ExecNode::eval_conjuncts(&expr, 1, row_batchs[i]->get_row(j)); + ExecNode::eval_conjuncts(&expr, 1, row_batches[i]->get_row(j)); } } diff --git a/be/test/exprs/hybird_set_test.cpp b/be/test/exprs/hybrid_set_test.cpp similarity index 83% rename from be/test/exprs/hybird_set_test.cpp rename to be/test/exprs/hybrid_set_test.cpp index fc884c3f02f0bc..39e6224664f2c0 100644 --- a/be/test/exprs/hybird_set_test.cpp +++ b/be/test/exprs/hybrid_set_test.cpp @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -#include "exprs/hybird_set.h" +#include "exprs/hybrid_set.h" #include #include @@ -25,16 +25,16 @@ namespace doris { // mock -class HybirdSetTest : public testing::Test { +class HybridSetTest : public testing::Test { public: - HybirdSetTest() { + HybridSetTest() { } protected: }; -TEST_F(HybirdSetTest, bool) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_BOOLEAN); +TEST_F(HybridSetTest, bool) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_BOOLEAN); bool a = true; set->insert(&a); a = false; @@ -45,7 +45,7 @@ TEST_F(HybirdSetTest, bool) { set->insert(&a); ASSERT_EQ(2, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(bool*)base->get_value()); @@ -58,8 +58,8 @@ TEST_F(HybirdSetTest, bool) { ASSERT_TRUE(set->find(&a)); } -TEST_F(HybirdSetTest, tinyint) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_TINYINT); +TEST_F(HybridSetTest, tinyint) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_TINYINT); int8_t a = 0; set->insert(&a); a = 1; @@ -75,7 +75,7 @@ TEST_F(HybirdSetTest, tinyint) { ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(int8_t*)base->get_value()); @@ -95,8 +95,8 @@ TEST_F(HybirdSetTest, tinyint) { a = 5; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, smallint) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_SMALLINT); +TEST_F(HybridSetTest, smallint) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_SMALLINT); int16_t a = 0; set->insert(&a); a = 1; @@ -111,7 +111,7 @@ TEST_F(HybirdSetTest, smallint) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(int16_t*)base->get_value()); @@ -131,8 +131,8 @@ TEST_F(HybirdSetTest, smallint) { a = 5; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, int) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_INT); +TEST_F(HybridSetTest, int) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_INT); int32_t a = 0; set->insert(&a); a = 1; @@ -147,7 +147,7 @@ TEST_F(HybirdSetTest, int) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(int32_t*)base->get_value()); @@ -167,8 +167,8 @@ TEST_F(HybirdSetTest, int) { a = 5; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, bigint) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_BIGINT); +TEST_F(HybridSetTest, bigint) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_BIGINT); int64_t a = 0; set->insert(&a); a = 1; @@ -183,7 +183,7 @@ TEST_F(HybirdSetTest, bigint) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(int64_t*)base->get_value()); @@ -203,8 +203,8 @@ TEST_F(HybirdSetTest, bigint) { a = 5; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, float) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_FLOAT); +TEST_F(HybridSetTest, float) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_FLOAT); float a = 0; set->insert(&a); a = 1.1; @@ -219,7 +219,7 @@ TEST_F(HybirdSetTest, float) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(float*)base->get_value()); @@ -239,8 +239,8 @@ TEST_F(HybirdSetTest, float) { a = 5.1; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, double) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_DOUBLE); +TEST_F(HybridSetTest, double) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_DOUBLE); double a = 0; set->insert(&a); a = 1.1; @@ -255,7 +255,7 @@ TEST_F(HybirdSetTest, double) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << (*(double*)base->get_value()); @@ -275,8 +275,8 @@ TEST_F(HybirdSetTest, double) { a = 5.1; ASSERT_FALSE(set->find(&a)); } -TEST_F(HybirdSetTest, string) { - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_VARCHAR); +TEST_F(HybridSetTest, string) { + HybridSetBase* set = HybridSetBase::create_set(TYPE_VARCHAR); StringValue a; char buf[100]; @@ -298,7 +298,7 @@ TEST_F(HybirdSetTest, string) { set->insert(&a); ASSERT_EQ(5, set->size()); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << ((StringValue*)base->get_value())->ptr; @@ -325,10 +325,10 @@ TEST_F(HybirdSetTest, string) { b.len = 5; ASSERT_FALSE(set->find(&b)); } -TEST_F(HybirdSetTest, timestamp) { +TEST_F(HybridSetTest, timestamp) { CpuInfo::init(); - HybirdSetBase* set = HybirdSetBase::create_set(TYPE_DATETIME); + HybridSetBase* set = HybridSetBase::create_set(TYPE_DATETIME); char s1[] = "2012-01-20 01:10:01"; char s2[] = "1990-10-20 10:10:10.123456 "; char s3[] = " 1990-10-20 10:10:10.123456"; @@ -346,7 +346,7 @@ TEST_F(HybirdSetTest, timestamp) { set->insert(&v2); set->insert(&v3); - HybirdSetBase::IteratorBase* base = set->begin(); + HybridSetBase::IteratorBase* base = set->begin(); while (base->has_next()) { LOG(INFO) << ((DateTimeValue*)base->get_value())->debug_string(); diff --git a/be/test/exprs/in_op_test.cpp b/be/test/exprs/in_op_test.cpp index caaebd0945b73c..a55ab20882fa8b 100644 --- a/be/test/exprs/in_op_test.cpp +++ b/be/test/exprs/in_op_test.cpp @@ -184,37 +184,37 @@ TEST_F(InOpTest, SimplePerformanceTest) { ASSERT_TRUE(expr != NULL); ASSERT_TRUE(expr->prepare(_runtime_state, *_row_desc).ok()); int size = 1024 * 1024 / capacity; - VectorizedRowBatch* vec_row_batchs[size]; + VectorizedRowBatch* vec_row_batches[size]; srand(time(NULL)); for (int i = 0; i < size; ++i) { - vec_row_batchs[i] = _object_pool->add( + vec_row_batches[i] = _object_pool->add( new VectorizedRowBatch( *_runtime_state->desc_tbl().get_tuple_descriptor(0), capacity)); - MemPool* mem_pool = vec_row_batchs[i]->mem_pool(); + MemPool* mem_pool = vec_row_batches[i]->mem_pool(); int32_t* vec_data = reinterpret_cast( mem_pool->allocate(sizeof(int32_t) * capacity)); - vec_row_batchs[i]->column(0)->set_col_data(vec_data); + vec_row_batches[i]->column(0)->set_col_data(vec_data); for (int i = 0; i < capacity; ++i) { vec_data[i] = rand() % 256; } - vec_row_batchs[i]->set_size(capacity); + vec_row_batches[i]->set_size(capacity); } - RowBatch* row_batchs[size]; + RowBatch* row_batches[size]; for (int i = 0; i < size; ++i) { - row_batchs[i] = _object_pool->add(new RowBatch(*_row_desc, capacity)); - vec_row_batchs[i]->to_row_batch(row_batchs[i]); + row_batches[i] = _object_pool->add(new RowBatch(*_row_desc, capacity)); + vec_row_batches[i]->to_row_batch(row_batches[i]); } MonotonicStopWatch stopwatch; stopwatch.start(); for (int i = 0; i < size; ++i) { - expr->evaluate(vec_row_batchs[i]); + expr->evaluate(vec_row_batches[i]); } uint64_t vec_time = stopwatch.elapsed_time(); @@ -224,7 +224,7 @@ TEST_F(InOpTest, SimplePerformanceTest) { for (int i = 0; i < size; ++i) { for (int j = 0; j < capacity; ++j) { - ExecNode::eval_conjuncts(&expr, 1, row_batchs[i]->get_row(j)); + ExecNode::eval_conjuncts(&expr, 1, row_batches[i]->get_row(j)); } } diff --git a/be/test/olap/cumulative_compaction_policy_test.cpp b/be/test/olap/cumulative_compaction_policy_test.cpp index 3fab6f70c0af22..7819cc21b9dce9 100644 --- a/be/test/olap/cumulative_compaction_policy_test.cpp +++ b/be/test/olap/cumulative_compaction_policy_test.cpp @@ -232,7 +232,7 @@ TEST_F(TestNumBasedCumulativeCompactionPolicy, calculate_cumulative_point) { ASSERT_EQ(4, _tablet->cumulative_layer_point()); } -TEST_F(TestNumBasedCumulativeCompactionPolicy, pick_candicate_rowsets) { +TEST_F(TestNumBasedCumulativeCompactionPolicy, pick_candidate_rowsets) { std::vector rs_metas; init_all_rs_meta_cal_point(&rs_metas); @@ -246,7 +246,7 @@ TEST_F(TestNumBasedCumulativeCompactionPolicy, pick_candicate_rowsets) { _tablet->calculate_cumulative_point(); std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); ASSERT_EQ(2, candidate_rowsets.size()); } @@ -267,7 +267,7 @@ TEST_F(TestNumBasedCumulativeCompactionPolicy, pick_input_rowsets_normal) { NumBasedCumulativeCompactionPolicy policy; std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -297,7 +297,7 @@ TEST_F(TestNumBasedCumulativeCompactionPolicy, pick_input_rowsets_delete) { NumBasedCumulativeCompactionPolicy policy; std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -710,7 +710,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, calculate_cumulative_point_overl ASSERT_EQ(2, _tablet->cumulative_layer_point()); } -TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candicate_rowsets) { +TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candidate_rowsets) { std::vector rs_metas; init_all_rs_meta_cal_point(&rs_metas); @@ -724,12 +724,12 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candicate_rowsets) { _tablet->calculate_cumulative_point(); std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); ASSERT_EQ(3, candidate_rowsets.size()); } -TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candicate_rowsets_big_base) { +TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candidate_rowsets_big_base) { std::vector rs_metas; init_rs_meta_big_base(&rs_metas); @@ -743,7 +743,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_candicate_rowsets_big_base) _tablet->calculate_cumulative_point(); std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); ASSERT_EQ(3, candidate_rowsets.size()); } @@ -763,7 +763,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_normal) { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -792,7 +792,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_big_base) { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -821,7 +821,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_promotion) { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -850,7 +850,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_not_same_leve std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -879,7 +879,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_empty) { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -908,7 +908,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_not_reach_min std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; @@ -937,7 +937,7 @@ TEST_F(TestSizeBasedCumulativeCompactionPolicy, pick_input_rowsets_delete) { std::vector candidate_rowsets; - _tablet->pick_candicate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); + _tablet->pick_candidate_rowsets_to_cumulative_compaction(1000, &candidate_rowsets); std::vector input_rowsets; Version last_delete_version{-1, -1}; diff --git a/be/test/olap/push_handler_test.cpp b/be/test/olap/push_handler_test.cpp index 39e0108fc776f0..3b71b960052095 100644 --- a/be/test/olap/push_handler_test.cpp +++ b/be/test/olap/push_handler_test.cpp @@ -65,12 +65,12 @@ Schema PushHandlerTest::create_schema() { #define TUPLE_ID_DST 0 #define TUPLE_ID_SRC 1 -#define CLOMN_NUMBERS 4 +#define COLUMN_NUMBERS 4 #define DST_TUPLE_SLOT_ID_START 1 #define SRC_TUPLE_SLOT_ID_START 5 int PushHandlerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_slot_id) { - const char *clomnNames[] = {"k1_int", "k2_smallint", "k3_varchar", "v_bigint"}; - for (int i = 0; i < CLOMN_NUMBERS; i++) + const char *columnNames[] = {"k1_int", "k2_smallint", "k3_varchar", "v_bigint"}; + for (int i = 0; i < COLUMN_NUMBERS; i++) { TSlotDescriptor slot_desc; @@ -90,7 +90,7 @@ int PushHandlerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_s slot_desc.columnPos = i; slot_desc.byteOffset = i*16+8; // 8 bytes for null slot_desc.nullIndicatorBit = i%8; - slot_desc.colName = clomnNames[i]; + slot_desc.colName = columnNames[i]; slot_desc.slotIdx = i + 1; slot_desc.isMaterialized = true; @@ -100,7 +100,7 @@ int PushHandlerTest::create_src_tuple(TDescriptorTable& t_desc_table, int next_s // TTupleDescriptor source TTupleDescriptor t_tuple_desc; t_tuple_desc.id = TUPLE_ID_SRC; - t_tuple_desc.byteSize = CLOMN_NUMBERS*16+8; // 8 bytes for null + t_tuple_desc.byteSize = COLUMN_NUMBERS*16+8; // 8 bytes for null t_tuple_desc.numNullBytes = 1; t_tuple_desc.tableId = 0; t_tuple_desc.__isset.tableId = true; diff --git a/be/test/olap/run_length_byte_test.cpp b/be/test/olap/run_length_byte_test.cpp index 155f3b24180a48..9e68e7edba9ef5 100755 --- a/be/test/olap/run_length_byte_test.cpp +++ b/be/test/olap/run_length_byte_test.cpp @@ -205,7 +205,7 @@ TEST(TestStream, UncompressInStream) { } } -// the length after compress must be smaller than origal stream, then the compressor will be called. +// the length after compress must be smaller than original stream, then the compressor will be called. TEST(TestStream, CompressOutStream) { // write data OutStream *out_stream = @@ -255,7 +255,7 @@ TEST(TestStream, CompressOutStream2) { std::vector offsets; offsets.push_back(0); - offsets.push_back(59); // if lzo, this shoudl be 57 + offsets.push_back(59); // if lzo, this should be 57 InStream *in_stream = new (std::nothrow) InStream(&inputs, offsets, @@ -772,7 +772,7 @@ TEST_F(TestRunLengthByte, ReadWriteMultiBytes) { _writer->flush(); - // the stream contain head, contral byte and four byte literal + // the stream contain head, control byte and four byte literal ASSERT_EQ(_out_stream->get_stream_length(), sizeof(StreamHead) + 1 + 4); // read data @@ -798,7 +798,7 @@ TEST_F(TestRunLengthByte, ReadWriteSameBytes) { _writer->flush(); - // the stream contain head, contral byte(4-3) and one byte literal + // the stream contain head, control byte(4-3) and one byte literal ASSERT_EQ(_out_stream->get_stream_length(), sizeof(StreamHead) + 1 + 1); // read data diff --git a/be/test/olap/tablet_test.cpp b/be/test/olap/tablet_test.cpp index e0cd77d2b8d1b3..698c77d6fa5b78 100755 --- a/be/test/olap/tablet_test.cpp +++ b/be/test/olap/tablet_test.cpp @@ -120,8 +120,7 @@ class TestTablet : public testing::Test { init_rs_meta(ptr5, 10, 11); rs_metas->push_back(ptr5); } - - void fetch_expried_row_rs_meta(std::vector* rs_metas) { + void fetch_expired_row_rs_meta(std::vector* rs_metas) { RowsetMetaSharedContainerPtr v2(new std::vector()); RowsetMetaSharedPtr ptr1(new RowsetMeta()); @@ -173,10 +172,10 @@ class TestTablet : public testing::Test { TEST_F(TestTablet, delete_expired_stale_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); for (auto &rowset : rs_metas) { _tablet_meta->add_rs_meta(rowset); @@ -185,7 +184,7 @@ TEST_F(TestTablet, delete_expired_stale_rowset) { TabletSharedPtr _tablet(new Tablet(_tablet_meta, nullptr)); _tablet->init(); - for(auto ptr: expried_rs_metas) { + for(auto ptr: expired_rs_metas) { for (auto rs : *ptr) { _tablet->_timestamped_version_tracker.add_version(rs->version()); } diff --git a/be/test/olap/timestamped_version_tracker_test.cpp b/be/test/olap/timestamped_version_tracker_test.cpp index 3fb3f9ed39c4f4..458cc6c6fba24b 100644 --- a/be/test/olap/timestamped_version_tracker_test.cpp +++ b/be/test/olap/timestamped_version_tracker_test.cpp @@ -144,7 +144,7 @@ class TestTimestampedVersionTracker : public testing::Test { rs_metas->push_back(ptr5); } - void init_expried_row_rs_meta(std::vector* rs_metas) { + void init_expired_row_rs_meta(std::vector* rs_metas) { RowsetMetaSharedPtr ptr1(new RowsetMeta()); init_rs_meta(ptr1, 2, 3); @@ -176,7 +176,7 @@ class TestTimestampedVersionTracker : public testing::Test { } - void init_expried_row_rs_meta_with_same_rowset(std::vector* rs_metas) { + void init_expired_row_rs_meta_with_same_rowset(std::vector* rs_metas) { RowsetMetaSharedPtr ptr0(new RowsetMeta()); init_rs_meta(ptr0, 1, 1); @@ -212,7 +212,7 @@ class TestTimestampedVersionTracker : public testing::Test { } - void fetch_expried_row_rs_meta(std::vector* rs_metas) { + void fetch_expired_row_rs_meta(std::vector* rs_metas) { RowsetMetaSharedContainerPtr v2(new std::vector()); RowsetMetaSharedPtr ptr1(new RowsetMeta()); @@ -252,7 +252,7 @@ class TestTimestampedVersionTracker : public testing::Test { rs_metas->push_back(v5); } - void fetch_expried_row_rs_meta_with_same_rowset(std::vector* rs_metas) { + void fetch_expired_row_rs_meta_with_same_rowset(std::vector* rs_metas) { RowsetMetaSharedContainerPtr v1(new std::vector()); RowsetMetaSharedPtr ptr0(new RowsetMeta()); @@ -321,14 +321,14 @@ TEST_F(TestTimestampedVersionTracker, construct_version_graph) { TEST_F(TestTimestampedVersionTracker, construct_version_graph_with_same_version) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; VersionGraph version_graph; init_all_rs_meta(&rs_metas); - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); int64_t max_version = 0; version_graph.construct_version_graph(rs_metas, &max_version); @@ -412,16 +412,16 @@ TEST_F(TestTimestampedVersionTracker, add_version_to_graph_with_same_version) { TEST_F(TestTimestampedVersionTracker, capture_consistent_versions) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta(&expried_rs_metas); + init_expired_row_rs_meta(&expired_rs_metas); VersionGraph version_graph; int64_t max_version = 0; - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); version_graph.construct_version_graph(rs_metas, &max_version); @@ -438,16 +438,16 @@ TEST_F(TestTimestampedVersionTracker, capture_consistent_versions) { TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_with_same_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta_with_same_rowset(&expried_rs_metas); + init_expired_row_rs_meta_with_same_rowset(&expired_rs_metas); VersionGraph version_graph; int64_t max_version = 0; - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); version_graph.construct_version_graph(rs_metas, &max_version); @@ -464,14 +464,14 @@ TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_with_same_rows TEST_F(TestTimestampedVersionTracker, construct_versioned_tracker) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta(&expried_rs_metas); + init_expired_row_rs_meta(&expired_rs_metas); - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); @@ -483,14 +483,14 @@ TEST_F(TestTimestampedVersionTracker, construct_versioned_tracker) { TEST_F(TestTimestampedVersionTracker, construct_version_tracker_by_stale_meta) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta(&expried_rs_metas); + init_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; - tracker.construct_versioned_tracker(rs_metas, expried_rs_metas); + tracker.construct_versioned_tracker(rs_metas, expired_rs_metas); ASSERT_EQ(10, tracker._version_graph._version_graph.size()); ASSERT_EQ(4, tracker._stale_version_path_map.size()); @@ -500,14 +500,14 @@ TEST_F(TestTimestampedVersionTracker, construct_version_tracker_by_stale_meta) { TEST_F(TestTimestampedVersionTracker, construct_versioned_tracker_with_same_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta_with_same_rowset(&expried_rs_metas); + init_expired_row_rs_meta_with_same_rowset(&expired_rs_metas); - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); @@ -519,13 +519,13 @@ TEST_F(TestTimestampedVersionTracker, construct_versioned_tracker_with_same_rows TEST_F(TestTimestampedVersionTracker, recover_versioned_tracker) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - init_expried_row_rs_meta(&expried_rs_metas); - rs_metas.insert(rs_metas.end(), expried_rs_metas.begin(), - expried_rs_metas.end()); + init_expired_row_rs_meta(&expired_rs_metas); + rs_metas.insert(rs_metas.end(), expired_rs_metas.begin(), + expired_rs_metas.end()); const std::map stale_version_path_map; TimestampedVersionTracker tracker; @@ -569,15 +569,15 @@ TEST_F(TestTimestampedVersionTracker, add_version_with_same_rowset) { TEST_F(TestTimestampedVersionTracker, add_stale_path_version) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - init_expried_row_rs_meta(&expried_rs_metas); - tracker.add_stale_path_version(expried_rs_metas); + init_expired_row_rs_meta(&expired_rs_metas); + tracker.add_stale_path_version(expired_rs_metas); ASSERT_EQ(1, tracker._stale_version_path_map.size()); ASSERT_EQ(7, tracker._stale_version_path_map.begin()->second->timestamped_versions().size()); @@ -586,15 +586,15 @@ TEST_F(TestTimestampedVersionTracker, add_stale_path_version) { TEST_F(TestTimestampedVersionTracker, add_stale_path_version_with_same_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - fetch_expried_row_rs_meta_with_same_rowset(&expried_rs_metas); - for(auto ptr:expried_rs_metas) { + fetch_expired_row_rs_meta_with_same_rowset(&expired_rs_metas); + for(auto ptr:expired_rs_metas) { tracker.add_stale_path_version(*ptr); } @@ -605,15 +605,15 @@ TEST_F(TestTimestampedVersionTracker, add_stale_path_version_with_same_rowset) { TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_tracker) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -633,15 +633,15 @@ TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_tracker) { TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_tracker_with_same_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta_with_same_rowset(&expried_rs_metas); + fetch_expired_row_rs_meta_with_same_rowset(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -661,14 +661,14 @@ TEST_F(TestTimestampedVersionTracker, capture_consistent_versions_tracker_with_s TEST_F(TestTimestampedVersionTracker, fetch_and_delete_path_version) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -708,14 +708,14 @@ TEST_F(TestTimestampedVersionTracker, fetch_and_delete_path_version) { TEST_F(TestTimestampedVersionTracker, fetch_and_delete_path_version_with_same_rowset) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta_with_same_rowset(&expried_rs_metas); + fetch_expired_row_rs_meta_with_same_rowset(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -758,15 +758,15 @@ TEST_F(TestTimestampedVersionTracker, fetch_and_delete_path_version_with_same_ro TEST_F(TestTimestampedVersionTracker, capture_expired_path_version) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector path_version; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -783,15 +783,15 @@ TEST_F(TestTimestampedVersionTracker, capture_expired_path_version) { TEST_F(TestTimestampedVersionTracker, get_stale_version_path_json_doc) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); - for(auto ptr:expried_rs_metas) { + for(auto ptr:expired_rs_metas) { for (auto rs : *ptr) { tracker.add_version(rs->version()); } @@ -840,11 +840,11 @@ TEST_F(TestTimestampedVersionTracker, get_stale_version_path_json_doc) { TEST_F(TestTimestampedVersionTracker, get_stale_version_path_json_doc_empty) { std::vector rs_metas; - std::vector expried_rs_metas; + std::vector expired_rs_metas; std::vector version_path; init_all_rs_meta(&rs_metas); - fetch_expried_row_rs_meta(&expried_rs_metas); + fetch_expired_row_rs_meta(&expired_rs_metas); TimestampedVersionTracker tracker; tracker.construct_versioned_tracker(rs_metas); diff --git a/be/test/runtime/buffered_block_mgr2_test.cpp b/be/test/runtime/buffered_block_mgr2_test.cpp index 31cae391a58a27..709413b9707205 100644 --- a/be/test/runtime/buffered_block_mgr2_test.cpp +++ b/be/test/runtime/buffered_block_mgr2_test.cpp @@ -54,7 +54,7 @@ const static string SCRATCH_DIR = "/tmp/doris-scratch"; // This suffix is appended to a tmp dir const static string SCRATCH_SUFFIX = "/doris-scratch"; -// Number of millieconds to wait to ensure write completes +// Number of milliseconds to wait to ensure write completes const static int WRITE_WAIT_MILLIS = 500; // How often to check for write completion diff --git a/be/test/runtime/datetime_value_test.cpp b/be/test/runtime/datetime_value_test.cpp index e69535eb357fbe..19822df4d4127e 100644 --- a/be/test/runtime/datetime_value_test.cpp +++ b/be/test/runtime/datetime_value_test.cpp @@ -1364,7 +1364,7 @@ TEST_F(DateTimeValueTest, to_int64) { } } -TEST_F(DateTimeValueTest, operatro_minus) { +TEST_F(DateTimeValueTest, operator_minus) { { DateTimeValue v1; ASSERT_TRUE(v1.from_date_int64(19880201)); diff --git a/be/test/runtime/decimalv2_value_test.cpp b/be/test/runtime/decimalv2_value_test.cpp index b4da49e5220dae..b6062fd67ff134 100644 --- a/be/test/runtime/decimalv2_value_test.cpp +++ b/be/test/runtime/decimalv2_value_test.cpp @@ -147,7 +147,7 @@ TEST_F(DecimalV2ValueTest, int_to_decimal) { std::cout << "value8: " << value8.get_debug_info() << std::endl; ASSERT_EQ("11", value8.to_string(3)); - // more than 9digit, fraction will be trancated to 999999999 + // more than 9digit, fraction will be truncated to 999999999 DecimalV2Value value9(1230123456789, 1230123456789); std::cout << "value9: " << value9.get_debug_info() << std::endl; ASSERT_EQ("1230123456789.999999999", value9.to_string(10)); diff --git a/be/test/runtime/etl_job_mgr_test.cpp b/be/test/runtime/etl_job_mgr_test.cpp index 6a256b3a41a5fe..f51d174f60893e 100644 --- a/be/test/runtime/etl_job_mgr_test.cpp +++ b/be/test/runtime/etl_job_mgr_test.cpp @@ -205,7 +205,7 @@ TEST_F(EtlJobMgrTest, CancelJob) { ASSERT_EQ(TStatusCode::OK, res.status.status_code); } -TEST_F(EtlJobMgrTest, FinishUnknowJob) { +TEST_F(EtlJobMgrTest, FinishUnknownJob) { EtlJobMgr mgr(&_exec_env); TUniqueId id; id.lo = 1; diff --git a/be/test/runtime/export_task_mgr_test.cpp b/be/test/runtime/export_task_mgr_test.cpp index 9d67751f54f91c..917d3b9113fe53 100644 --- a/be/test/runtime/export_task_mgr_test.cpp +++ b/be/test/runtime/export_task_mgr_test.cpp @@ -201,7 +201,7 @@ TEST_F(ExportTaskMgrTest, CancelJob) { ASSERT_EQ(TStatusCode::OK, res.status.status_code); } -TEST_F(ExportTaskMgrTest, FinishUnknowJob) { +TEST_F(ExportTaskMgrTest, FinishUnknownJob) { ExportTaskMgr mgr(&_exec_env); TUniqueId id; id.lo = 1; diff --git a/be/test/util/doris_metrics_test.cpp b/be/test/util/doris_metrics_test.cpp index 1ccd6544ae00c8..0c2b6c31cfe784 100644 --- a/be/test/util/doris_metrics_test.cpp +++ b/be/test/util/doris_metrics_test.cpp @@ -148,7 +148,7 @@ TEST_F(DorisMetricsTest, Normal) { ASSERT_TRUE(metric != nullptr); ASSERT_STREQ("22", metric->to_string().c_str()); } - // comapction + // compaction { DorisMetrics::instance()->base_compaction_deltas_total->increment(30); auto metric = server_entity->get_metric("base_compaction_deltas_total", "compaction_deltas_total"); diff --git a/be/test/util/new_metrics_test.cpp b/be/test/util/new_metrics_test.cpp index c1b4af8eea9455..ea2937b2d6c4a5 100644 --- a/be/test/util/new_metrics_test.cpp +++ b/be/test/util/new_metrics_test.cpp @@ -340,32 +340,32 @@ test_registry_cpu{mode="idle"} 18 { // Register one common metric to an entity with label - auto entity = registry.register_entity("test_entity", {{"name", "lable_test"}}); + auto entity = registry.register_entity("test_entity", {{"name", "label_test"}}); MetricPrototype cpu_idle_type(MetricType::GAUGE, MetricUnit::PERCENT, "cpu_idle"); IntCounter* cpu_idle = (IntCounter*)entity->register_metric(&cpu_idle_type); cpu_idle->increment(28); ASSERT_EQ(R"(# TYPE test_registry_cpu_idle gauge -test_registry_cpu_idle{name="lable_test"} 28 +test_registry_cpu_idle{name="label_test"} 28 )", registry.to_prometheus()); - ASSERT_EQ(R"([{"tags":{"metric":"cpu_idle","name":"lable_test"},"unit":"percent","value":28}])", registry.to_json()); + ASSERT_EQ(R"([{"tags":{"metric":"cpu_idle","name":"label_test"},"unit":"percent","value":28}])", registry.to_json()); ASSERT_EQ("", registry.to_core_string()); registry.deregister_entity(entity); } { // Register one common metric with group name to an entity with label - auto entity = registry.register_entity("test_entity", {{"name", "lable_test"}}); + auto entity = registry.register_entity("test_entity", {{"name", "label_test"}}); MetricPrototype cpu_idle_type(MetricType::GAUGE, MetricUnit::PERCENT, "cpu_idle", "", "cpu", {{"mode", "idle"}}); IntCounter* cpu_idle = (IntCounter*)entity->register_metric(&cpu_idle_type); cpu_idle->increment(38); ASSERT_EQ(R"(# TYPE test_registry_cpu gauge -test_registry_cpu{name="lable_test",mode="idle"} 38 +test_registry_cpu{name="label_test",mode="idle"} 38 )", registry.to_prometheus()); - ASSERT_EQ(R"([{"tags":{"metric":"cpu","mode":"idle","name":"lable_test"},"unit":"percent","value":38}])", registry.to_json()); + ASSERT_EQ(R"([{"tags":{"metric":"cpu","mode":"idle","name":"label_test"},"unit":"percent","value":38}])", registry.to_json()); ASSERT_EQ("", registry.to_core_string()); registry.deregister_entity(entity); } diff --git a/be/test/util/path_trie_test.cpp b/be/test/util/path_trie_test.cpp index b9fac1412f23cf..b001771bd18b5a 100644 --- a/be/test/util/path_trie_test.cpp +++ b/be/test/util/path_trie_test.cpp @@ -113,7 +113,7 @@ TEST_F(PathTrieTest, MultiTemplateTest) { std::string path = "/db/{table}"; ASSERT_TRUE(root.insert(path, 100)); - // Dumplicate template + // Duplicate template path = "/db/{rollup}/abc"; ASSERT_FALSE(root.insert(path, 110)); @@ -133,7 +133,7 @@ TEST_F(PathTrieTest, MultiPlayTest) { std::string path = "/db/abc"; ASSERT_TRUE(root.insert(path, 100)); - // Dumplicate template + // Duplicate template path = "/db"; ASSERT_TRUE(root.insert(path, 110)); @@ -154,7 +154,7 @@ TEST_F(PathTrieTest, EmptyTest) { std::string path = "/"; ASSERT_TRUE(root.insert(path, 100)); - // Dumplicate template + // Duplicate template path = "/"; ASSERT_FALSE(root.insert(path, 110)); diff --git a/be/test/util/tdigest_test.cpp b/be/test/util/tdigest_test.cpp index 4535a841800a6c..21ab5e46968bfb 100644 --- a/be/test/util/tdigest_test.cpp +++ b/be/test/util/tdigest_test.cpp @@ -242,7 +242,7 @@ TEST_F(TDigestTest, Montonicity) { } } -} // namespace stesting +} // namespace testing int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv);