Skip to content

Commit e5cc811

Browse files
CUDA: fix crash on uneven context
1 parent 1f5accb commit e5cc811

File tree

5 files changed

+24
-12
lines changed

5 files changed

+24
-12
lines changed

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2113,7 +2113,7 @@ static bool ggml_cuda_should_fuse_mul_mat_vec_f(const ggml_tensor * tensor) {
21132113
src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
21142114

21152115
const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
2116-
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, is_mul_mat_id ? src1->ne[2] : src1->ne[1]);
2116+
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, is_mul_mat_id ? src1->ne[2] : src1->ne[1]);
21172117

21182118
const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft) ||
21192119
ggml_backend_buft_is_cuda_split(src1->buffer->buft);
@@ -2207,16 +2207,16 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
22072207
const int cc = ggml_cuda_info().devices[id].cc;
22082208
const int warp_size = ggml_cuda_info().devices[id].warp_size;
22092209
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
2210-
use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src1->ne[1], /*mul_mat_id=*/false);
2211-
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src1->ne[1]);
2210+
use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src0->nb, src1->ne[1], /*mul_mat_id=*/false);
2211+
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, src1->ne[1]);
22122212
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc);
22132213
}
22142214
} else {
22152215
const int cc = ggml_cuda_info().devices[ctx.device].cc;
22162216
const int warp_size = ggml_cuda_info().devices[ctx.device].warp_size;
22172217
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
2218-
use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src1->ne[1], /*mul_mat_id=*/false);
2219-
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src1->ne[1]);
2218+
use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src0->nb, src1->ne[1], /*mul_mat_id=*/false);
2219+
use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, src1->ne[1]);
22202220
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc);
22212221
}
22222222

@@ -2287,7 +2287,7 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
22872287
return;
22882288
}
22892289

2290-
if (ggml_cuda_should_use_mmf(src0->type, cc, WARP_SIZE, src0->ne, src1->ne[2], /*mul_mat_id=*/true)) {
2290+
if (ggml_cuda_should_use_mmf(src0->type, cc, WARP_SIZE, src0->ne, src0->nb, src1->ne[2], /*mul_mat_id=*/true)) {
22912291
ggml_cuda_mul_mat_f(ctx, src0, src1, ids, dst);
22922292
return;
22932293
}

ggml/src/ggml-cuda/mmf.cu

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -119,15 +119,21 @@ void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * sr
119119
}
120120
}
121121

122-
bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * src0_ne, const int src1_ncols, bool mul_mat_id) {
123-
122+
bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * src0_ne,
123+
const size_t * src0_nb, const int src1_ncols, bool mul_mat_id) {
124124
if (ggml_is_quantized(type)) {
125125
return false;
126126
}
127127

128-
if (src0_ne[0] % (warp_size * (4/ggml_type_size(type))) != 0) {
128+
const size_t ts = ggml_type_size(type);
129+
if (src0_ne[0] % (warp_size * (4/ts)) != 0) {
129130
return false;
130131
}
132+
for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
133+
if (src0_nb[i] % (2*ts) != 0) {
134+
return false;
135+
}
136+
}
131137
if (src0_ne[1] % MMF_ROWS_PER_BLOCK != 0) {
132138
return false;
133139
}

ggml/src/ggml-cuda/mmf.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ struct mmf_ids_data {
1717

1818
void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst);
1919

20-
bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * scr0_ne, const int src1_ncols, bool mul_mat_id);
20+
bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * scr0_ne, const size_t * src0_nb, const int src1_ncols, bool mul_mat_id);
2121

2222
template <typename T, int rows_per_block, int cols_per_block, int nwarps, bool has_ids>
2323
__launch_bounds__(ggml_cuda_get_physical_warp_size()*nwarps, 1)

ggml/src/ggml-cuda/mmvf.cu

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -716,10 +716,16 @@ void ggml_cuda_op_mul_mat_vec_f(
716716
GGML_UNUSED_VARS(ctx, src1, dst, src1_ddq_i, src1_ncols, src1_padded_row_size);
717717
}
718718

719-
bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11) {
719+
bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, const size_t * src0_nb, int64_t ne11) {
720720
if (src0_ne[0] % 2 != 0) {
721721
return false;
722722
}
723+
const size_t ts = ggml_type_size(type);
724+
for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
725+
if (src0_nb[i] % (2*ts) != 0) {
726+
return false;
727+
}
728+
}
723729
switch (type) {
724730
case GGML_TYPE_F32:
725731
if (GGML_CUDA_CC_IS_NVIDIA(cc)) {

ggml/src/ggml-cuda/mmvf.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,4 @@ void ggml_cuda_op_mul_mat_vec_f(
99
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
1010
const int64_t src1_padded_row_size, cudaStream_t stream);
1111

12-
bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11);
12+
bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, const size_t * src0_nb, int64_t ne11);

0 commit comments

Comments
 (0)