We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6071787 commit a79da45Copy full SHA for a79da45
llama.h
@@ -430,7 +430,7 @@ extern "C" {
430
431
LLAMA_API bool llama_supports_mmap (void);
432
LLAMA_API bool llama_supports_mlock (void);
433
- LLAMA_API bool llama_supports_rpc (void);
+ LLAMA_API bool llama_supports_rpc (void); // TMP: https://github.com/ggerganov/llama.cpp/pull/7647#issuecomment-2140234367
434
LLAMA_API bool llama_supports_gpu_offload(void);
435
436
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
0 commit comments