Skip to content

Commit b67c0df

Browse files
committed
Added llama_supports_rpc function to test for RPC support at runtime.
1 parent 3e2ee44 commit b67c0df

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

llama.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15937,6 +15937,14 @@ bool llama_supports_mlock(void) {
1593715937
return llama_mlock::SUPPORTED;
1593815938
}
1593915939

15940+
bool llama_supports_rpc(void) {
15941+
#if defined(GGML_USE_RPC)
15942+
return true;
15943+
#else
15944+
return false;
15945+
#endif
15946+
}
15947+
1594015948
bool llama_supports_gpu_offload(void) {
1594115949
#if defined(GGML_USE_CUDA) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
1594215950
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,7 @@ extern "C" {
430430

431431
LLAMA_API bool llama_supports_mmap (void);
432432
LLAMA_API bool llama_supports_mlock (void);
433+
LLAMA_API bool llama_supports_rpc (void);
433434
LLAMA_API bool llama_supports_gpu_offload(void);
434435

435436
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);

0 commit comments

Comments
 (0)