File tree Expand file tree Collapse file tree 2 files changed +9
-0
lines changed Expand file tree Collapse file tree 2 files changed +9
-0
lines changed Original file line number Diff line number Diff line change @@ -15937,6 +15937,14 @@ bool llama_supports_mlock(void) {
15937
15937
return llama_mlock::SUPPORTED;
15938
15938
}
15939
15939
15940
+ bool llama_supports_rpc(void) {
15941
+ #if defined(GGML_USE_RPC)
15942
+ return true;
15943
+ #else
15944
+ return false;
15945
+ #endif
15946
+ }
15947
+
15940
15948
bool llama_supports_gpu_offload(void) {
15941
15949
#if defined(GGML_USE_CUDA) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
15942
15950
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
Original file line number Diff line number Diff line change @@ -430,6 +430,7 @@ extern "C" {
430
430
431
431
LLAMA_API bool llama_supports_mmap (void );
432
432
LLAMA_API bool llama_supports_mlock (void );
433
+ LLAMA_API bool llama_supports_rpc (void );
433
434
LLAMA_API bool llama_supports_gpu_offload (void );
434
435
435
436
LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx);
You can’t perform that action at this time.
0 commit comments