Skip to content

Commit 25f8378

Browse files
authored
Merge branch 'ggml-org:master' into master
2 parents 0bc9df3 + 009b709 commit 25f8378

File tree

116 files changed

+8272
-2312
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

116 files changed

+8272
-2312
lines changed

.devops/vulkan.Dockerfile

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,30 @@ ARG UBUNTU_VERSION=24.04
22

33
FROM ubuntu:$UBUNTU_VERSION AS build
44

5-
# Install build tools
6-
RUN apt update && apt install -y git build-essential cmake wget
5+
# Ref: https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
76

8-
# Install Vulkan SDK and cURL
9-
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
10-
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list https://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
11-
apt update -y && \
12-
apt-get install -y vulkan-sdk libcurl4-openssl-dev curl
7+
# Install build tools
8+
RUN apt update && apt install -y git build-essential cmake wget xz-utils
9+
10+
# Install Vulkan SDK
11+
ARG VULKAN_VERSION=1.4.321.1
12+
RUN ARCH=$(uname -m) && \
13+
wget -qO /tmp/vulkan-sdk.tar.xz https://sdk.lunarg.com/sdk/download/${VULKAN_VERSION}/linux/vulkan-sdk-linux-${ARCH}-${VULKAN_VERSION}.tar.xz && \
14+
mkdir -p /opt/vulkan && \
15+
tar -xf /tmp/vulkan-sdk.tar.xz -C /tmp --strip-components=1 && \
16+
mv /tmp/${ARCH}/* /opt/vulkan/ && \
17+
rm -rf /tmp/*
18+
19+
# Install cURL and Vulkan SDK dependencies
20+
RUN apt install -y libcurl4-openssl-dev curl \
21+
libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev
22+
23+
# Set environment variables
24+
ENV VULKAN_SDK=/opt/vulkan
25+
ENV PATH=$VULKAN_SDK/bin:$PATH
26+
ENV LD_LIBRARY_PATH=$VULKAN_SDK/lib:$LD_LIBRARY_PATH
27+
ENV CMAKE_PREFIX_PATH=$VULKAN_SDK:$CMAKE_PREFIX_PATH
28+
ENV PKG_CONFIG_PATH=$VULKAN_SDK/lib/pkgconfig:$PKG_CONFIG_PATH
1329

1430
# Build it
1531
WORKDIR /app

common/arg.cpp

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,7 +1106,7 @@ static void common_params_print_completion(common_params_context & ctx_arg) {
11061106
printf("\"\n\n");
11071107

11081108
printf(" case \"$prev\" in\n");
1109-
printf(" --model)\n");
1109+
printf(" --model|-m)\n");
11101110
printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
11111111
printf(" return 0\n");
11121112
printf(" ;;\n");
@@ -2555,15 +2555,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25552555
{"--lora"}, "FNAME",
25562556
"path to LoRA adapter (can be repeated to use multiple adapters)",
25572557
[](common_params & params, const std::string & value) {
2558-
params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
2558+
params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
25592559
}
25602560
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25612561
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
25622562
add_opt(common_arg(
25632563
{"--lora-scaled"}, "FNAME", "SCALE",
25642564
"path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
25652565
[](common_params & params, const std::string & fname, const std::string & scale) {
2566-
params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
2566+
params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
25672567
}
25682568
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25692569
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
@@ -3538,6 +3538,22 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35383538
}
35393539
).set_examples({LLAMA_EXAMPLE_SERVER}));
35403540

3541+
add_opt(common_arg(
3542+
{"--fim-qwen-30b-default"},
3543+
string_format("use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet)"),
3544+
[](common_params & params) {
3545+
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
3546+
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
3547+
params.port = 8012;
3548+
params.n_gpu_layers = 99;
3549+
params.flash_attn = true;
3550+
params.n_ubatch = 1024;
3551+
params.n_batch = 1024;
3552+
params.n_ctx = 0;
3553+
params.n_cache_reuse = 256;
3554+
}
3555+
).set_examples({LLAMA_EXAMPLE_SERVER}));
3556+
35413557
add_opt(common_arg(
35423558
{ "--diffusion-steps" }, "N",
35433559
string_format("number of diffusion steps (default: %d)", params.diffusion.steps),

common/chat.cpp

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1361,6 +1361,26 @@ static common_chat_params common_chat_params_init_gpt_oss(const common_chat_temp
13611361
"<|end|>",
13621362
};
13631363

1364+
if (!inputs.json_schema.is_null()) {
1365+
data.grammar_lazy = false;
1366+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
1367+
auto schema = inputs.json_schema;
1368+
builder.resolve_refs(schema);
1369+
1370+
auto not_end = builder.add_rule("not-end",
1371+
"[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
1372+
auto analysis = builder.add_rule("analysis",
1373+
"\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
1374+
auto constraint = builder.add_rule("constraint", "\"<|constrain|>\"? [a-zA-Z0-9_-]+");
1375+
auto final = builder.add_rule("final",
1376+
"\"<|channel|>final\" ( \" \" " + constraint + " )? \"<|message|>\" " +
1377+
builder.add_schema("response", schema)
1378+
);
1379+
1380+
builder.add_rule("root", "( " + analysis + " \"<|start|>assistant\" )? " + final);
1381+
});
1382+
}
1383+
13641384
if (inputs.tools.is_array() && !inputs.tools.empty()) {
13651385
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
13661386
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
@@ -2121,7 +2141,7 @@ static common_chat_params common_chat_templates_apply_jinja(
21212141
}
21222142

21232143
// GPT-OSS
2124-
if (src.find("<|channel|>") != std::string::npos && params.json_schema.is_null()) {
2144+
if (src.find("<|channel|>") != std::string::npos) {
21252145
return common_chat_params_init_gpt_oss(tmpl, params);
21262146
}
21272147

common/common.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -988,7 +988,12 @@ struct common_init_result common_init_from_params(common_params & params) {
988988
return iparams;
989989
}
990990

991+
char buf[1024];
991992
la.ptr = lora.get();
993+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.task_name", buf, sizeof(buf));
994+
la.task_name = buf;
995+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf));
996+
la.prompt_prefix = buf;
992997
iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
993998
}
994999

common/common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ struct common_adapter_lora_info {
3434
std::string path;
3535
float scale;
3636

37+
std::string task_name;
38+
std::string prompt_prefix;
39+
3740
struct llama_adapter_lora * ptr;
3841
};
3942

0 commit comments

Comments
 (0)