From 57e2a7a52a819883f40dada8a2edc24ecf48186b Mon Sep 17 00:00:00 2001 From: John <78893154+cmp-nct@users.noreply.github.com> Date: Thu, 18 Jan 2024 23:12:15 +0100 Subject: [PATCH] llama : fix falcon arch for tied output embeddings (#4978) * falcon arch fix for tied output embeddings * Update llama.cpp Co-authored-by: Georgi Gerganov * Update llama.cpp * Update llama.cpp Co-authored-by: Georgi Gerganov * Update llama.cpp --------- Co-authored-by: Georgi Gerganov --- llama.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f1d00a96c667c..47b4384a8b88b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3438,7 +3438,12 @@ static bool llm_load_tensors( { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_OUTPUT, "weight").c_str()) >= 0) { + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + } else { + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU + ml.n_created--; // artificial tensor + } } for (int i = 0; i < n_layer; ++i) {