Skip to content

Commit

Permalink
backend: rebase llama.cpp submodule on latest upstream (#2694)
Browse files Browse the repository at this point in the history
* Adds support for GPT-NeoX, Gemma 2, OpenELM, ChatGLM, and Jais architectures (all with Kompute support)
* Also enables Kompute support for StarCoder2, XVERSE, Command R, and OLMo
* Includes a number of Kompute resource management fixes

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
  • Loading branch information
cebtenzzre committed Jul 19, 2024
1 parent 398ef34 commit 290c629
Show file tree
Hide file tree
Showing 4 changed files with 268 additions and 213 deletions.
28 changes: 14 additions & 14 deletions gpt4all-backend/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -90,25 +90,25 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
else()
set(GPT4ALL_ALLOW_NON_AVX ON)
endif()
set(LLAMA_AVX2 ${GPT4ALL_ALLOW_NON_AVX})
set(LLAMA_F16C ${GPT4ALL_ALLOW_NON_AVX})
set(LLAMA_FMA ${GPT4ALL_ALLOW_NON_AVX})

set(LLAMA_METAL OFF)
set(LLAMA_KOMPUTE OFF)
set(LLAMA_VULKAN OFF)
set(LLAMA_CUDA OFF)
set(LLAMA_ROCM OFF)
set(GGML_AVX2 ${GPT4ALL_ALLOW_NON_AVX})
set(GGML_F16C ${GPT4ALL_ALLOW_NON_AVX})
set(GGML_FMA ${GPT4ALL_ALLOW_NON_AVX})

set(GGML_METAL OFF)
set(GGML_KOMPUTE OFF)
set(GGML_VULKAN OFF)
set(GGML_CUDA OFF)
set(GGML_ROCM OFF)
if (BUILD_VARIANT MATCHES metal)
set(LLAMA_METAL ON)
set(GGML_METAL ON)
elseif (BUILD_VARIANT MATCHES kompute)
set(LLAMA_KOMPUTE ON)
set(GGML_KOMPUTE ON)
elseif (BUILD_VARIANT MATCHES vulkan)
set(LLAMA_VULKAN ON)
set(GGML_VULKAN ON)
elseif (BUILD_VARIANT MATCHES cuda)
set(LLAMA_CUDA ON)
set(GGML_CUDA ON)
elseif (BUILD_VARIANT MATCHES rocm)
set(LLAMA_HIPBLAS ON)
set(GGML_HIPBLAS ON)
endif()

# Include GGML
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-backend/llama.cpp-mainline
Loading

0 comments on commit 290c629

Please sign in to comment.