@@ -10,6 +10,8 @@ TARGET?=--target grpc-server
10
10
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
11
11
CMAKE_ARGS+=-DBUILD_SHARED_LIBS =OFF -DLLAMA_CURL=OFF
12
12
13
+ CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST ) ) ) )
14
+
13
15
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
14
16
ifeq ($(BUILD_TYPE ) ,cublas)
15
17
CMAKE_ARGS+=-DGGML_CUDA=ON
@@ -59,61 +61,49 @@ ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
59
61
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
60
62
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
61
63
LLAMA_VERSION=$(LLAMA_VERSION) \
62
- $(MAKE) grpc-server
64
+ $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
63
65
else
64
66
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
65
- LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) grpc-server
67
+ LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
66
68
endif
67
69
68
- # This target is for manually building a variant with-auto detected flags
69
- llama-cpp : llama.cpp purge
70
- $(info ${GREEN}I llama-cpp build info:avx2${RESET})
71
- $(MAKE ) VARIANT=" llama-cpp-copy" build-llama-cpp-grpc-server
72
- cp -rfv grpc-server llama-cpp
73
-
74
70
llama-cpp-avx2 : llama.cpp
75
- mkdir -p llama-cpp-avx2-build
76
- cp -rf * llama-cpp-avx2-build
77
- $(MAKE ) -C llama-cpp-avx2-build purge
71
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx2-build
72
+ $(MAKE ) -C $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx2-build purge
78
73
$(info ${GREEN}I llama-cpp build info:avx2${RESET})
79
- CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE ) VARIANT=" llama-avx2" build-llama-cpp-grpc-server
80
- cp -rfv llama-cpp-avx2-build/grpc-server llama-cpp-avx2
74
+ CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE ) VARIANT=" llama-cpp- avx2-build " build-llama-cpp-grpc-server
75
+ cp -rfv $( CURRENT_MAKEFILE_DIR ) /../ llama-cpp-avx2-build/grpc-server llama-cpp-avx2
81
76
82
77
llama-cpp-avx512 : llama.cpp
83
- mkdir -p llama-cpp-avx512-build
84
- cp -rf * llama-cpp-avx512-build
85
- $(MAKE ) -C llama-cpp-avx512-build purge
78
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx512-build
79
+ $(MAKE ) -C $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx512-build purge
86
80
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
87
- CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE ) VARIANT=" llama-avx512" build-llama-cpp-grpc-server
88
- cp -rfv llama-cpp-avx512-build/grpc-server llama-cpp-avx512
81
+ CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE ) VARIANT=" llama-cpp- avx512-build " build-llama-cpp-grpc-server
82
+ cp -rfv $( CURRENT_MAKEFILE_DIR ) /../ llama-cpp-avx512-build/grpc-server llama-cpp-avx512
89
83
90
84
llama-cpp-avx : llama.cpp
91
- mkdir -p llama-cpp-avx-build
92
- cp -rf * llama-cpp-avx-build
93
- $(MAKE ) -C llama-cpp-avx-build purge
85
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx-build
86
+ $(MAKE ) -C $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-avx-build purge
94
87
$(info ${GREEN}I llama-cpp build info:avx${RESET})
95
- CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE ) VARIANT=" llama-avx" build-llama-cpp-grpc-server
96
- cp -rfv llama-cpp-avx-build/grpc-server llama-cpp-avx
88
+ CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE ) VARIANT=" llama-cpp- avx-build " build-llama-cpp-grpc-server
89
+ cp -rfv $( CURRENT_MAKEFILE_DIR ) /../ llama-cpp-avx-build/grpc-server llama-cpp-avx
97
90
98
91
llama-cpp-fallback : llama.cpp
99
- mkdir -p llama-cpp-fallback-build
100
- cp -rf * llama-cpp-fallback-build
101
- $(MAKE ) -C llama-cpp-fallback-build purge
92
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-fallback-build
93
+ $(MAKE ) -C $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-fallback-build purge
102
94
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
103
- CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE ) VARIANT=" llama-fallback" build-llama-cpp-grpc-server
104
- cp -rfv llama-cpp-fallback-build/grpc-server llama-cpp-fallback
95
+ CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE ) VARIANT=" llama-cpp- fallback-build " build-llama-cpp-grpc-server
96
+ cp -rfv $( CURRENT_MAKEFILE_DIR ) /../ llama-cpp-fallback-build/grpc-server llama-cpp-fallback
105
97
106
98
llama-cpp-grpc : llama.cpp
107
- mkdir -p llama-cpp-grpc-build
108
- cp -rf * llama-cpp-grpc-build
109
- $(MAKE ) -C llama-cpp-grpc-build purge
99
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-grpc-build
100
+ $(MAKE ) -C $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-grpc-build purge
110
101
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
111
- CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET=" --target grpc-server --target rpc-server" $(MAKE ) VARIANT=" llama-grpc" build-llama-cpp-grpc-server
112
- cp -rfv llama-cpp-grpc-build/grpc-server llama-cpp-grpc
102
+ CMAKE_ARGS=" $( CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET=" --target grpc-server --target rpc-server" $(MAKE ) VARIANT=" llama-cpp- grpc-build " build-llama-cpp-grpc-server
103
+ cp -rfv $( CURRENT_MAKEFILE_DIR ) /../ llama-cpp-grpc-build/grpc-server llama-cpp-grpc
113
104
114
105
llama-cpp-rpc-server : llama-cpp-grpc
115
- cp -rf llama-cpp-grpc-build/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
116
-
106
+ cp -rf $(CURRENT_MAKEFILE_DIR ) /../llama-cpp-grpc-build/llama.cpp/build/bin/rpc-server llama-cpp-rpc-server
117
107
118
108
llama.cpp :
119
109
mkdir -p llama.cpp
0 commit comments