Skip to content

Commit 31a9f7c

Browse files
committed
add debug message
1 parent 7fd372b commit 31a9f7c

File tree

1 file changed

+27
-24
lines changed

1 file changed

+27
-24
lines changed

Repartitioner/Repartitioner.H

Lines changed: 27 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -87,27 +87,28 @@ public:
8787
};
8888

8989
/* returns the owner rank for a given rank */
90-
label get_owner_rank(
91-
const ExecutorHandler& exec_handler
92-
) const { return get_owner_rank(get_rank(exec_handler)); };
90+
label get_owner_rank(const ExecutorHandler &exec_handler) const
91+
{
92+
return get_owner_rank(get_rank(exec_handler));
93+
};
9394

9495
/* returns if current rank is an owner */
95-
bool is_owner(
96-
const ExecutorHandler& exec_handler
97-
) const { return get_rank(exec_handler) == get_owner_rank(get_rank(exec_handler)); };
96+
bool is_owner(const ExecutorHandler &exec_handler) const
97+
{
98+
return get_rank(exec_handler) == get_owner_rank(get_rank(exec_handler));
99+
};
98100

99101
/* @brief check if the given rank gets local after repartitioning
100102
*
101103
* */
102-
bool reparts_to_local(
103-
const ExecutorHandler& exec_handler,
104-
label rank) const
104+
bool reparts_to_local(const ExecutorHandler &exec_handler, label rank) const
105105
{
106-
return get_owner_rank(exec_handler) == compute_owner_rank(rank, ranks_per_gpu_);
106+
return get_owner_rank(exec_handler) ==
107+
compute_owner_rank(rank, ranks_per_gpu_);
107108
};
108109

109110
/* shortcut to current rank */
110-
label get_rank(const ExecutorHandler& exec_handler) const
111+
label get_rank(const ExecutorHandler &exec_handler) const
111112
{
112113
return exec_handler.get_communicator().get()->rank();
113114
};
@@ -153,11 +154,11 @@ public:
153154
* the interface was originally from this rank
154155
*/
155156
std::pair<SparsityPatternVector, std::vector<bool>>
156-
build_non_local_interfaces(
157-
const ExecutorHandler& exec_handler,
158-
SparsityPatternVector &loc,
157+
build_non_local_interfaces(const ExecutorHandler &exec_handler,
158+
SparsityPatternVector &loc,
159159
const SparsityPatternVector &non_loc) const
160160
{
161+
LOG_1(verbose_, "start build non local interfaces")
161162
std::vector<label> rows, cols, ldu_mapping, ranks, begins, ends;
162163
std::vector<bool> is_local;
163164
label merged_ranks_size = non_loc.ranks.size();
@@ -199,6 +200,7 @@ public:
199200
ranks.push_back(get_owner_rank(non_loc.ranks[i]));
200201
}
201202
}
203+
LOG_1(verbose_, "done build non local interfaces")
202204
return std::make_pair(
203205
SparsityPatternVector{rows, cols, ldu_mapping, begins, ends, ranks},
204206
is_local);
@@ -216,11 +218,11 @@ public:
216218
std::shared_ptr<SparsityPattern>,
217219
std::vector<std::pair<bool, label>>>
218220
repartition_sparsity(
219-
const ExecutorHandler& exec_handler,
221+
const ExecutorHandler &exec_handler,
220222
std::shared_ptr<SparsityPattern> src_local_pattern,
221223
std::shared_ptr<SparsityPattern> src_non_local_pattern) const
222224
{
223-
LOG_1(verbose_, "start repartition sparsity pattern")
225+
LOG_1(verbose_, "start repartition sparsity pattern")
224226
// 1. obtain send recv sizes vector
225227
// here we can reuse code from repartition_comm_pattern
226228
//
@@ -266,7 +268,8 @@ public:
266268
local_comm_pattern, merged_local.mapping, rank, ranks_per_gpu);
267269
}
268270

269-
label rows = (is_owner(exec_handler)) ? merged_local.rows.back() + 1 : 0;
271+
label rows =
272+
(is_owner(exec_handler)) ? merged_local.rows.back() + 1 : 0;
270273
gko::dim<2> merged_local_dim{rows, rows};
271274

272275
auto non_local_comm_pattern = compute_send_recv_counts(
@@ -319,8 +322,8 @@ public:
319322
rank, ranks_per_gpu);
320323
}
321324

322-
auto [gathered_non_local, is_local] =
323-
build_non_local_interfaces(exec_handler, merged_local, merged_non_local);
325+
auto [gathered_non_local, is_local] = build_non_local_interfaces(
326+
exec_handler, merged_local, merged_non_local);
324327

325328
// build vector with locality information
326329
std::vector<std::pair<bool, label>> locality;
@@ -344,14 +347,14 @@ public:
344347
// pattern where a particular face is in the send idxs
345348
// since we have already the row id of the other side
346349
// it should be doable. Or alternatively we know that we
347-
// keep an interface together thus we can just count the idx up to the size.
348-
// But we have to make sure that the interfaces are in the same order
349-
// on both communication sides.
350+
// keep an interface together thus we can just count the idx up to the
351+
// size. But we have to make sure that the interfaces are in the same
352+
// order on both communication sides.
350353
for (int i = 0; i < gathered_non_local.cols.size(); i++) {
351354
gathered_non_local.cols[i] = i;
352355
}
353356

354-
LOG_1(verbose_, "done repartition sparsity pattern")
357+
LOG_1(verbose_, "done repartition sparsity pattern")
355358
if (is_owner(exec_handler)) {
356359
auto new_local_spars_pattern = std::make_shared<SparsityPattern>(
357360
exec, merged_local_dim, merged_local);
@@ -406,7 +409,7 @@ public:
406409
}
407410

408411
std::shared_ptr<const CommunicationPattern> repartition_comm_pattern(
409-
const ExecutorHandler& exec_handler,
412+
const ExecutorHandler &exec_handler,
410413
std::shared_ptr<const CommunicationPattern> src_comm_pattern,
411414
std::shared_ptr<
412415
const gko::experimental::distributed::Partition<label, label>>

0 commit comments

Comments
 (0)