We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
glm4moe
1 parent 5ecb83a commit 17d3714Copy full SHA for 17d3714
src/llama-model.cpp
@@ -14209,10 +14209,6 @@ struct llm_build_glm4_moe : public llm_graph_context {
14209
LLM_FFN_SILU, LLM_FFN_PAR, il);
14210
cb(cur, "ffn_out", il);
14211
} else {
14212
- // MoE layer with shared experts
14213
- const int64_t n_expert = hparams.n_expert;
14214
- const int64_t n_expert_used = hparams.n_expert_used;
14215
-
14216
// Process routed experts using existing MoE infrastructure
14217
ggml_tensor * routed_out = build_moe_ffn(cur,
14218
model.layers[il].ffn_gate_inp,
0 commit comments