From 0d8999a24fab9102bfaaac293687eac009664bef Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Mon, 10 Apr 2023 21:52:10 +0200 Subject: [PATCH] Add compatibility with #801 --- examples/common.cpp | 3 ++- llama.cpp | 10 ++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index db01fb23ccd20..040d3a51028b7 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -151,6 +151,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.lora_adapter = argv[i]; + params.use_mmap = false; } else if (arg == "-i" || arg == "--interactive") { params.interactive = true; } else if (arg == "--embedding") { @@ -254,7 +255,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { } fprintf(stderr, " --mtest compute maximum memory usage\n"); fprintf(stderr, " --verbose-prompt print prompt before generation\n"); - fprintf(stderr, " --lora FNAME apply LoRA adapter\n"); + fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); fprintf(stderr, "\n"); diff --git a/llama.cpp b/llama.cpp index cb5e5d0622925..9ead5ccf98254 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1800,6 +1800,12 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor ggml_context* lora_ctx = ggml_init(params); std::unordered_map lora_tensors; + // create a name -> tensor map of the model to accelerate lookups + std::unordered_map model_tensors; + for (auto & kv: model.tensors_by_name) { + model_tensors.insert(kv); + } + fprintf(stderr, "%s: ", __func__); // read tensors and apply @@ -1839,7 +1845,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor base_name.erase(pos); // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); - if (model.tensors.find(base_name.data()) == model.tensors.end()) { + if (model_tensors.find(base_name.data()) == model_tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); return 1; } @@ -1878,7 +1884,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() && lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) { - ggml_tensor * tensor = model.tensors[base_name]; + ggml_tensor * tensor = model_tensors[base_name]; ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];