Skip to content

Commit 8f46f2e

Browse files
committed
fix lint
1 parent 2b3ef79 commit 8f46f2e

File tree

9 files changed

+29
-25
lines changed

9 files changed

+29
-25
lines changed

include/tvm/auto_scheduler/search_policy.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
#include <tvm/auto_scheduler/search_task.h>
6666
#include <tvm/node/node.h>
6767

68+
#include <string>
6869
#include <unordered_set>
6970
#include <vector>
7071

src/auto_scheduler/search_policy/sketch_policy.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <algorithm>
3232
#include <iomanip>
3333
#include <limits>
34+
#include <memory>
3435
#include <queue>
3536
#include <set>
3637
#include <string>
@@ -88,7 +89,7 @@ SketchPolicy::SketchPolicy(SearchTask task, CostModel program_cost_model,
8889
}
8990

9091
// NOTE: There are strong dependency among the rules below,
91-
// so the order to push them into the vector should be consid carefully.
92+
// so the order to push them into the vector should be considered carefully.
9293
if (IsCPUTask(node->search_task)) {
9394
// Sketch Generation Rules
9495
node->sketch_rules.push_back(&rule_always_inline);

src/auto_scheduler/search_policy/sketch_policy.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <tvm/auto_scheduler/cost_model.h>
3535
#include <tvm/auto_scheduler/search_policy.h>
3636

37+
#include <memory>
3738
#include <set>
3839
#include <string>
3940
#include <unordered_set>

src/auto_scheduler/search_policy/sketch_policy_rules.cc

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -492,8 +492,8 @@ PopulationGenerationRule::ResultKind InitChangeComputeLocation::Apply(SketchPoli
492492
continue;
493493
}
494494

495-
std::vector<std::pair<int, int>> candidates
496-
= GetComputeLocationCandidates(policy->search_task, *state, stage_id);
495+
std::vector<std::pair<int, int>> candidates =
496+
GetComputeLocationCandidates(policy->search_task, *state, stage_id);
497497

498498
int choice = (policy->rand_gen)() % (candidates.size() + 2);
499499

@@ -958,23 +958,23 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol
958958
PopulationGenerationRule::ResultKind MutateAutoUnroll::Apply(SketchPolicyNode* policy,
959959
State* state) const {
960960
// Extract all auto_unroll_max_step pragma steps.
961-
std::vector<int> annotate_steps;
961+
std::vector<int> pragma_steps;
962962
for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
963963
if (auto ps = (*state)->transform_steps[i].as<PragmaStepNode>()) {
964964
if (StrStartsWith(ps->pragma_type, "auto_unroll_max_step")) {
965-
annotate_steps.push_back(i);
965+
pragma_steps.push_back(i);
966966
}
967967
}
968968
}
969-
if (annotate_steps.empty()) {
969+
if (pragma_steps.empty()) {
970970
return ResultKind::kInvalid;
971971
}
972972

973973
std::vector<int>& auto_unroll_configs =
974974
IsGPUTask(policy->search_task) ? auto_unroll_configs_gpu : auto_unroll_configs_cpu;
975975

976-
// Randomly pick up an unroll step
977-
auto step_id = annotate_steps[(policy->rand_gen)() % annotate_steps.size()];
976+
// Randomly pick up an auto unroll pragma step
977+
auto step_id = pragma_steps[(policy->rand_gen)() % pragma_steps.size()];
978978
auto ps = (*state)->transform_steps[step_id].as<PragmaStepNode>();
979979
CHECK(ps);
980980

@@ -1018,8 +1018,8 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo
10181018
int stage_inc = GetTargetStageIDInState(*state, step_id) - ps->stage_id;
10191019
CHECK(ps != nullptr);
10201020

1021-
std::vector<std::pair<int, int>> candidates
1022-
= GetComputeLocationCandidates(policy->search_task, *state, ps->stage_id + stage_inc);
1021+
std::vector<std::pair<int, int>> candidates =
1022+
GetComputeLocationCandidates(policy->search_task, *state, ps->stage_id + stage_inc);
10231023

10241024
if (candidates.empty()) {
10251025
return PopulationGenerationRule::ResultKind::kInvalid;
@@ -1039,8 +1039,8 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo
10391039
tmp_s.CopyOnWrite()->transform_steps.push_back((*state)->transform_steps[s]);
10401040
}
10411041
try {
1042-
StepApplyToState(tmp_s->transform_steps.back(), &tmp_s, policy->search_task->compute_dag);
1043-
} catch (dmlc::Error &e) {
1042+
StepApplyToState(tmp_s->transform_steps.back(), &tmp_s, policy->search_task->compute_dag);
1043+
} catch (dmlc::Error& e) {
10441044
return PopulationGenerationRule::ResultKind::kInvalid;
10451045
}
10461046
}

src/auto_scheduler/search_policy/sketch_policy_rules.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ class PopulationMutationRule : public PopulationGenerationRule {
178178
* \param selection_weight the probabiliy of applying this rule is
179179
* proportional to this weight
180180
*/
181-
PopulationMutationRule(double selection_weight) : weight(selection_weight) {}
181+
explicit PopulationMutationRule(double selection_weight) : weight(selection_weight) {}
182182

183183
/* \brief The weight of this rule */
184184
double weight;
@@ -188,7 +188,7 @@ class PopulationMutationRule : public PopulationGenerationRule {
188188
#define DEFINE_MUTATE_POPULATION_RULE(rule_name) \
189189
class rule_name : public PopulationMutationRule { \
190190
public: \
191-
rule_name(double weight) : PopulationMutationRule(weight) {} \
191+
explicit rule_name(double weight) : PopulationMutationRule(weight) {} \
192192
ResultKind Apply(SketchPolicyNode* policy, State* state) const final; \
193193
};
194194

src/auto_scheduler/search_policy/utils.cc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,8 @@ Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id) {
6767
return spatial_split_step_ids;
6868
}
6969

70-
71-
std::vector<std::pair<int, int>> GetComputeLocationCandidates(
72-
const SearchTask& task, const State& state, int stage_id) {
70+
std::vector<std::pair<int, int>> GetComputeLocationCandidates(const SearchTask& task,
71+
const State& state, int stage_id) {
7372
int target_stage_id = GetSingleConsumerId(task, state, stage_id);
7473
if (target_stage_id < 0) {
7574
return {};

src/auto_scheduler/search_policy/utils.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -645,7 +645,7 @@ inline void ComputePrefixSumProb(const std::vector<float>& weights,
645645
for (size_t i = 0; i < weights.size(); ++i) {
646646
(*prefix_sum_probs)[i] /= sum;
647647
}
648-
};
648+
}
649649

650650
/*! \brief Random choose an index according to a prefix sum probability. */
651651
inline int RandomChoose(const std::vector<double>& prefix_sum_probs, std::mt19937* random_gen) {
@@ -692,8 +692,8 @@ class SplitFactorizationMemo {
692692
Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id);
693693

694694
/*! \brief Get the possible compute locations for a stage. */
695-
std::vector<std::pair<int, int>> GetComputeLocationCandidates(
696-
const SearchTask& task, const State& state, int stage_id);
695+
std::vector<std::pair<int, int>> GetComputeLocationCandidates(const SearchTask& task,
696+
const State& state, int stage_id);
697697

698698
// Apply multi-level tiling structure according to a string format,
699699
// where "S" stands a space level, "R" stands for a reduction level.

tutorials/auto_scheduler/tune_conv2d_layer_cuda.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,13 +74,13 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
7474

7575
######################################################################
7676
# Next, we set parameters for the auto-scheduler. These parameters
77-
# mainly specify how we do the measurement.
77+
# mainly specify how we do the measurement during the search and auto-tuning.
7878
#
7979
# * `measure_ctx` launches a different process for measurement. This
80-
# provides an isolation. It can protect the master process from any crashes
80+
# provides an isolation. It can protect the master process from GPU crashes
8181
# happended during measurement and avoid other runtime conflicts.
8282
# * `min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
83-
# This can warmup the GPU, which is necessary to get reliable measurement results.
83+
# This can warmup the GPU, which is necessary to get accurate measurement results.
8484
# * `num_measure_trials` is the number of measurement trials we can use during the search.
8585
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
8686
# good value for the search to converge. You can do more trials according to your time budget.
@@ -109,7 +109,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
109109
######################################################################
110110
# We can lower the schedule to see the IR after auto-scheduling.
111111
# The auto-scheduler correctly performs optimizations including multi-level tiling,
112-
# parallelization, vectorization, unrolling and fusion.
112+
# cooperative fetching, unrolling and operator fusion.
113113

114114
print(tvm.lower(sch, args, simple_mode=True))
115115

@@ -172,6 +172,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
172172
# and resume the status of search policy and cost model with the log file.
173173
# In the example below we resume the status and do more 5 trials.
174174

175+
175176
log_file = "conv2d.json"
176177
cost_model = auto_scheduler.XGBModel()
177178
cost_model.update_from_file(log_file)
@@ -185,4 +186,5 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
185186
)
186187
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)
187188

189+
# kill the measurement process
188190
del measure_ctx

tutorials/auto_scheduler/tune_matmul_x86.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def matmul_add(N, L, M, dtype):
9696
######################################################################
9797
# We can lower the schedule to see the IR after auto-scheduling.
9898
# The auto-scheduler correctly performs optimizations including multi-level tiling,
99-
# parallelization, vectorization, unrolling and fusion.
99+
# parallelization, vectorization, unrolling and operator fusion.
100100

101101
print(tvm.lower(sch, args, simple_mode=True))
102102

0 commit comments

Comments
 (0)