Skip to content

Commit f696428

Browse files
authored
graph : add clamping to ffn_moe_weights_sum to avoid div-by-zero (#16655)
* add missing norm topk bias * use clamping instead, update number and add comment
1 parent 7cce4f8 commit f696428

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

src/llama-graph.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1009,10 +1009,9 @@ ggml_tensor * llm_graph_context::build_moe_ffn(
10091009
ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
10101010
cb(weights_sum, "ffn_moe_weights_sum", il);
10111011

1012-
if (arch == LLM_ARCH_BAILINGMOE2) {
1013-
weights_sum = ggml_scale_bias(ctx0, weights_sum, 1.0, 1e-20);
1014-
cb(weights_sum, "ffn_moe_weights_sum_biased", il);
1015-
}
1012+
// Avoid division by zero, clamp to smallest number representable by F16
1013+
weights_sum = ggml_clamp(ctx0, weights_sum, 6.103515625e-5, INFINITY);
1014+
cb(weights_sum, "ffn_moe_weights_sum_clamped", il);
10161015

10171016
weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
10181017
cb(weights, "ffn_moe_weights_norm", il);

0 commit comments

Comments
 (0)