Skip to content

Commit 87b180e

Browse files
pwilkinCISC
andauthored
Fix non-MoE regression
Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
1 parent 542f36b commit 87b180e

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4789,7 +4789,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
47894789

47904790
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
47914791

4792-
if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
4792+
if (arch == LLM_ARCH_ERNIE4_5_MOE && static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
47934793
int n_ff_exp = hparams.n_ff_exp;
47944794

47954795
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);

0 commit comments

Comments
 (0)