From c7ae9b7f169be4f70f448bc7d7263750ba11645f Mon Sep 17 00:00:00 2001 From: grauho Date: Sun, 12 May 2024 14:21:46 -0400 Subject: [PATCH] Adjusted model loading such that LoRA alpha tensors are not skipped for having an encoded dim of zero --- model.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/model.cpp b/model.cpp index 684317d2..a7cdc68c 100644 --- a/model.cpp +++ b/model.cpp @@ -796,6 +796,10 @@ ggml_type str_to_ggml_type(const std::string& dtype) { return ttype; } +static bool name_ends_with(const std::string name, const std::string suffix) { + return ((name.size() >= suffix.size()) && (name.compare(name.size() - suffix.size(), suffix.size(), suffix) == 0)); +} + // https://huggingface.co/docs/safetensors/index bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const std::string& prefix) { LOG_DEBUG("init from '%s'", file_path.c_str()); @@ -889,7 +893,7 @@ bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const } // ggml/src/ggml.c:2745 - if (n_dims < 1 || n_dims > GGML_MAX_DIMS) { + if ((n_dims < 1 || n_dims > GGML_MAX_DIMS) && (name_ends_with(name, ".alpha") == false)) { LOG_ERROR("skip tensor '%s' with n_dims %d", name.c_str(), n_dims); continue; }