Skip to content

Commit b8acb6c

Browse files
committed
swift : fix build
ggml-ci
1 parent b5554b9 commit b8acb6c

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

examples/batched.swift/Sources/main.swift

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ for id: llama_token in tokens {
6969

7070
print("\n")
7171

72-
var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0)
72+
var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1)
7373
defer {
7474
llama_batch_free(batch)
7575
}
@@ -80,7 +80,8 @@ batch.n_tokens = Int32(tokens.count)
8080
for (i, token) in tokens.enumerated() {
8181
batch.token[i] = token
8282
batch.pos[i] = Int32(i)
83-
batch.seq_id[i] = 0
83+
batch.n_seq_id[i] = 1
84+
batch.seq_id[i][0] = 0
8485
batch.logits[i] = 0
8586
}
8687

@@ -169,7 +170,8 @@ while n_cur <= n_len {
169170
// push this new token for next evaluation
170171
batch.token[Int(batch.n_tokens)] = new_token_id
171172
batch.pos[Int(batch.n_tokens)] = n_cur
172-
batch.seq_id[Int(batch.n_tokens)] = Int32(i)
173+
batch.n_seq_id[Int(batch.n_tokens)] = 1
174+
batch.seq_id[Int(batch.n_tokens)][0] = Int32(i)
173175
batch.logits[Int(batch.n_tokens)] = 1
174176

175177
i_batch[i] = batch.n_tokens

0 commit comments

Comments
 (0)