Skip to content

Commit 172e79c

Browse files
committed
add safety guard before reshaping lora tensors
1 parent cd0b599 commit 172e79c

File tree

1 file changed

+15
-0
lines changed

1 file changed

+15
-0
lines changed

lora.hpp

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -827,6 +827,21 @@ struct LoraModel : public GGMLRunner {
827827

828828
updown = ggml_merge_lora(compute_ctx, lora_down, lora_up, lora_mid);
829829
}
830+
if (updown == NULL) {
831+
continue;
832+
}
833+
834+
const int64_t model_elems = ggml_nelements(model_tensor);
835+
const int64_t lora_elems = ggml_nelements(updown);
836+
if (model_elems != lora_elems) {
837+
LOG_WARN("LoRA '%s' tensor '%s' is unsupported: element count mismatch (LoRA=%lld, model=%lld); skipping",
838+
file_path.c_str(),
839+
model_tensor_name.c_str(),
840+
(long long) lora_elems,
841+
(long long) model_elems);
842+
continue;
843+
}
844+
830845
scale_value *= multiplier;
831846
ggml_tensor* original_tensor = model_tensor;
832847
if (!ggml_backend_is_cpu(runtime_backend) && ggml_backend_buffer_is_host(original_tensor->buffer)) {

0 commit comments

Comments
 (0)