Skip to content

Commit 4373bbe

Browse files
committed
Remove extra semicolons
1 parent 9a5e1c1 commit 4373bbe

File tree

15 files changed

+94
-94
lines changed

15 files changed

+94
-94
lines changed

src/models/command-r.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,34 +37,34 @@ llm_build_command_r::llm_build_command_r(const llama_model & model, const llm_gr
3737
if (model.layers[il].bq) {
3838
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
3939
cb(Qcur, "Qcur", il);
40-
};
40+
}
4141
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4242
cb(Kcur, "Kcur", il);
4343
if (model.layers[il].bk) {
4444
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
4545
cb(Kcur, "Kcur", il);
46-
};
46+
}
4747
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
4848
cb(Vcur, "Vcur", il);
4949
if (model.layers[il].bv) {
5050
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5151
cb(Vcur, "Vcur", il);
52-
};
52+
}
5353
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
5454
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
5555
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
5656

5757
if (model.layers[il].attn_q_norm) {
5858
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM, il);
5959
cb(Qcur, "Qcur", il);
60-
};
60+
}
6161
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
6262
ext_factor, attn_factor, beta_fast, beta_slow);
6363

6464
if (model.layers[il].attn_k_norm) {
6565
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM, il);
6666
cb(Kcur, "Kcur", il);
67-
};
67+
}
6868
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
6969
ext_factor, attn_factor, beta_fast, beta_slow);
7070

@@ -75,12 +75,12 @@ llm_build_command_r::llm_build_command_r(const llama_model & model, const llm_gr
7575
cur = build_attn(inp_attn,
7676
model.layers[il].wo, model.layers[il].bo,
7777
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
78-
};
78+
}
7979
if (il == n_layer - 1 && inp_out_ids) {
8080
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8181
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
8282
ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
83-
};
83+
}
8484
ggml_tensor * attn_out = cur;
8585

8686
// feed-forward network
@@ -91,7 +91,7 @@ llm_build_command_r::llm_build_command_r(const llama_model & model, const llm_gr
9191
model.layers[il].ffn_down, NULL, NULL,
9292
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
9393
cb(cur, "ffn_out", il);
94-
};
94+
}
9595
// add together residual + FFN + self-attention
9696
cur = ggml_add(ctx0, cur, inpL);
9797
cur = ggml_add(ctx0, cur, attn_out);
@@ -101,7 +101,7 @@ llm_build_command_r::llm_build_command_r(const llama_model & model, const llm_gr
101101

102102
// input for next layer
103103
inpL = cur;
104-
};
104+
}
105105
cur = inpL;
106106

107107
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM, -1);
@@ -114,7 +114,7 @@ llm_build_command_r::llm_build_command_r(const llama_model & model, const llm_gr
114114

115115
if (f_logit_scale) {
116116
cur = ggml_scale(ctx0, cur, f_logit_scale);
117-
};
117+
}
118118
cb(cur, "result_output", -1);
119119
res->t_logits = cur;
120120

src/models/deci.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ llm_build_deci::llm_build_deci(const llama_model & model, const llm_graph_params
3636
// norm
3737
cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
3838
cb(cur, "attn_norm", il);
39-
};
39+
}
4040
if (n_head > 0 && n_head_kv == 0) {
4141
// "linear attention" of Llama-3_1-Nemotron-51B
4242
cur = build_lora_mm(model.layers[il].wo, cur);
@@ -52,19 +52,19 @@ llm_build_deci::llm_build_deci(const llama_model & model, const llm_graph_params
5252
if (model.layers[il].bq) {
5353
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
5454
cb(Qcur, "Qcur", il);
55-
};
55+
}
5656
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
5757
cb(Kcur, "Kcur", il);
5858
if (model.layers[il].bk) {
5959
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
6060
cb(Kcur, "Kcur", il);
61-
};
61+
}
6262
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
6363
cb(Vcur, "Vcur", il);
6464
if (model.layers[il].bv) {
6565
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
6666
cb(Vcur, "Vcur", il);
67-
};
67+
}
6868
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
6969
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
7070
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -82,21 +82,21 @@ llm_build_deci::llm_build_deci(const llama_model & model, const llm_graph_params
8282
cur = build_attn(inp_attn,
8383
model.layers[il].wo, model.layers[il].bo,
8484
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
85-
};
85+
}
8686
if (il == n_layer - 1 && inp_out_ids) {
8787
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8888
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
89-
};
89+
}
9090
// FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
9191
if (n_ff == 0) {
9292
continue;
93-
};
93+
}
9494
// modified to support attention-free layer of Llama-3_1-Nemotron-51B
9595
ggml_tensor * ffn_inp = cur;
9696
if (n_head > 0) {
9797
ffn_inp = ggml_add(ctx0, cur, inpSA);
9898
cb(ffn_inp, "ffn_inp", il);
99-
};
99+
}
100100
// feed-forward network
101101
if (model.layers[il].ffn_gate_inp == nullptr) {
102102
cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
@@ -108,7 +108,7 @@ llm_build_deci::llm_build_deci(const llama_model & model, const llm_graph_params
108108
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
109109
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
110110
cb(cur, "ffn_out", il);
111-
};
111+
}
112112
cur = ggml_add(ctx0, cur, ffn_inp);
113113
cb(cur, "ffn_out", il);
114114

@@ -117,7 +117,7 @@ llm_build_deci::llm_build_deci(const llama_model & model, const llm_graph_params
117117

118118
// input for next layer
119119
inpL = cur;
120-
};
120+
}
121121
cur = inpL;
122122

123123
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

src/models/deepseek.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,19 +42,19 @@ llm_build_deepseek::llm_build_deepseek(const llama_model & model, const llm_grap
4242
if (model.layers[il].bq) {
4343
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
4444
cb(Qcur, "Qcur", il);
45-
};
45+
}
4646
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4747
cb(Kcur, "Kcur", il);
4848
if (model.layers[il].bk) {
4949
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
5050
cb(Kcur, "Kcur", il);
51-
};
51+
}
5252
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
5353
cb(Vcur, "Vcur", il);
5454
if (model.layers[il].bv) {
5555
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5656
cb(Vcur, "Vcur", il);
57-
};
57+
}
5858
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
5959
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
6060
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -72,11 +72,11 @@ llm_build_deepseek::llm_build_deepseek(const llama_model & model, const llm_grap
7272
cur = build_attn(inp_attn,
7373
model.layers[il].wo, model.layers[il].bo,
7474
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
75-
};
75+
}
7676
if (il == n_layer - 1 && inp_out_ids) {
7777
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
7878
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
79-
};
79+
}
8080
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
8181
cb(ffn_inp, "ffn_inp", il);
8282

@@ -118,15 +118,15 @@ llm_build_deepseek::llm_build_deepseek(const llama_model & model, const llm_grap
118118
cur = ggml_add(ctx0, moe_out, ffn_shexp);
119119
cb(cur, "ffn_out", il);
120120
}
121-
};
121+
}
122122
cur = ggml_add(ctx0, cur, ffn_inp);
123123

124124
cur = build_cvec(cur, il);
125125
cb(cur, "l_out", il);
126126

127127
// input for next layer
128128
inpL = cur;
129-
};
129+
}
130130
cur = inpL;
131131

132132
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

src/models/deepseek2.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
5858
} else {
5959
q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
6060
cb(q, "q", il);
61-
};
61+
}
6262
// split into {n_embd_head_qk_nope, n_head, n_tokens}
6363
ggml_tensor * q_nope =
6464
ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k),
@@ -164,11 +164,11 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
164164
model.layers[il].wo, NULL,
165165
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
166166
}
167-
};
167+
}
168168
if (il == n_layer - 1 && inp_out_ids) {
169169
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
170170
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
171-
};
171+
}
172172
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
173173
cb(ffn_inp, "ffn_inp", il);
174174

@@ -210,15 +210,15 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
210210
cur = ggml_add(ctx0, moe_out, ffn_shexp);
211211
cb(cur, "ffn_out", il);
212212
}
213-
};
213+
}
214214
cur = ggml_add(ctx0, cur, ffn_inp);
215215

216216
cur = build_cvec(cur, il);
217217
cb(cur, "l_out", il);
218218

219219
// input for next layer
220220
inpL = cur;
221-
};
221+
}
222222
cur = inpL;
223223

224224
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

src/models/dots1.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ llm_build_dots1::llm_build_dots1(const llama_model & model, const llm_graph_para
6363
cur = build_attn(inp_attn,
6464
model.layers[il].wo, model.layers[il].bo,
6565
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
66-
};
66+
}
6767
if (il == n_layer - 1 && inp_out_ids) {
6868
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
6969
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
70-
};
70+
}
7171
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
7272
cb(ffn_inp, "ffn_inp", il);
7373

@@ -108,15 +108,15 @@ llm_build_dots1::llm_build_dots1(const llama_model & model, const llm_graph_para
108108
cur = ggml_add(ctx0, moe_out, ffn_shexp);
109109
cb(cur, "ffn_out", il);
110110
}
111-
};
111+
}
112112
cur = ggml_add(ctx0, cur, ffn_inp);
113113

114114
cur = build_cvec(cur, il);
115115
cb(cur, "l_out", il);
116116

117117
// input for next layer
118118
inpL = cur;
119-
};
119+
}
120120
cur = inpL;
121121

122122
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

src/models/dream.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,11 @@ llm_build_dream::llm_build_dream(const llama_model & model, const llm_graph_para
6161
cur = build_attn(inp_attn,
6262
model.layers[il].wo, model.layers[il].bo,
6363
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
64-
};
64+
}
6565
if (il == n_layer - 1 && inp_out_ids) {
6666
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
6767
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
68-
};
68+
}
6969
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
7070
cb(ffn_inp, "ffn_inp", il);
7171

@@ -87,7 +87,7 @@ llm_build_dream::llm_build_dream(const llama_model & model, const llm_graph_para
8787

8888
// input for next layer
8989
inpL = cur;
90-
};
90+
}
9191
cur = inpL;
9292

9393
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

src/models/ernie4-5-moe.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ llm_build_ernie4_5_moe::llm_build_ernie4_5_moe(const llama_model & model, const
2828
{
2929
cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
3030
cb(cur, "attn_norm", il);
31-
};
31+
}
3232
// self-attention
3333
{
3434
// compute Q and K and RoPE them
@@ -37,19 +37,19 @@ llm_build_ernie4_5_moe::llm_build_ernie4_5_moe(const llama_model & model, const
3737
if (model.layers[il].bq) {
3838
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
3939
cb(Qcur, "Qcur", il);
40-
};
40+
}
4141
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4242
cb(Kcur, "Kcur", il);
4343
if (model.layers[il].bk) {
4444
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
4545
cb(Kcur, "Kcur", il);
46-
};
46+
}
4747
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
4848
cb(Vcur, "Vcur", il);
4949
if (model.layers[il].bv) {
5050
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5151
cb(Vcur, "Vcur", il);
52-
};
52+
}
5353
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
5454
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
5555
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -68,11 +68,11 @@ llm_build_ernie4_5_moe::llm_build_ernie4_5_moe(const llama_model & model, const
6868
model.layers[il].wo, NULL,
6969
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
7070
cb(cur, "attn_out", il);
71-
};
71+
}
7272
if (il == n_layer - 1 && inp_out_ids) {
7373
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
7474
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
75-
};
75+
}
7676
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
7777
cb(ffn_inp, "ffn_inp", il);
7878

@@ -123,7 +123,7 @@ llm_build_ernie4_5_moe::llm_build_ernie4_5_moe(const llama_model & model, const
123123
cur = moe_out;
124124
}
125125
cb(cur, "ffn_out", il);
126-
};
126+
}
127127
cur = ggml_add(ctx0, cur, ffn_inp);
128128
cb(cur, "ffn_out", il);
129129

@@ -132,7 +132,7 @@ llm_build_ernie4_5_moe::llm_build_ernie4_5_moe(const llama_model & model, const
132132

133133
// input for next layer
134134
inpL = cur;
135-
};
135+
}
136136
cur = inpL;
137137

138138
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);

0 commit comments

Comments
 (0)