@@ -1370,8 +1370,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
13701370 [](common_params & params, int value) {
13711371 params.n_gpu_layers = value;
13721372 if (!llama_supports_gpu_offload ()) {
1373- fprintf (stderr, " warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n " );
1374- fprintf (stderr, " warning: see main README.md for information on enabling GPU BLAS support\n " );
1373+ fprintf (stderr, " warning: no usable GPU found, --gpu-layers option will be ignored\n " );
1374+ fprintf (stderr, " warning: one possible reason is that llama.cpp was compiled without GPU support\n " );
1375+ fprintf (stderr, " warning: consult docs/build.md for compilation instructions\n " );
13751376 }
13761377 }
13771378 ).set_env (" LLAMA_ARG_N_GPU_LAYERS" ));
@@ -2104,8 +2105,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
21042105 [](common_params & params, int value) {
21052106 params.speculative .n_gpu_layers = value;
21062107 if (!llama_supports_gpu_offload ()) {
2107- fprintf (stderr, " warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n " );
2108- fprintf (stderr, " warning: see main README.md for information on enabling GPU BLAS support\n " );
2108+ fprintf (stderr, " warning: no usable GPU found, --gpu-layers-draft option will be ignored\n " );
2109+ fprintf (stderr, " warning: one possible reason is that llama.cpp was compiled without GPU support\n " );
2110+ fprintf (stderr, " warning: consult docs/build.md for compilation instructions\n " );
21092111 }
21102112 }
21112113 ).set_examples ({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
0 commit comments