|  | 
|  | 1 | +#include "ggml-cuda/common.cuh" | 
|  | 2 | +#include "roll.cuh" | 
|  | 3 | + | 
|  | 4 | +static __forceinline__ __device__ int64_t wrap_index(const int64_t idx, const int64_t ne) { | 
|  | 5 | +    if (idx < 0) { | 
|  | 6 | +        return idx + ne; | 
|  | 7 | +    } | 
|  | 8 | +    if (idx >= ne) { | 
|  | 9 | +        return idx - ne; | 
|  | 10 | +    } | 
|  | 11 | +    return idx; | 
|  | 12 | +} | 
|  | 13 | + | 
|  | 14 | +static __global__ void roll_f32_cuda(const float * __restrict__ src, | 
|  | 15 | +                                     float * __restrict__ dst, | 
|  | 16 | +                                     const int64_t ne00, | 
|  | 17 | +                                     const int64_t ne01, | 
|  | 18 | +                                     const int64_t ne02, | 
|  | 19 | +                                     const int64_t ne03, | 
|  | 20 | +                                     const int     s0, | 
|  | 21 | +                                     const int     s1, | 
|  | 22 | +                                     const int     s2, | 
|  | 23 | +                                     const int     s3) { | 
|  | 24 | +    const int64_t idx        = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; | 
|  | 25 | +    const int64_t n_elements = ne00 * ne01 * ne02 * ne03; | 
|  | 26 | + | 
|  | 27 | +    if (idx >= n_elements) { | 
|  | 28 | +        return; | 
|  | 29 | +    } | 
|  | 30 | + | 
|  | 31 | +    const int64_t i0 = idx % ne00; | 
|  | 32 | +    const int64_t i1 = (idx / ne00) % ne01; | 
|  | 33 | +    const int64_t i2 = (idx / (ne00 * ne01)) % ne02; | 
|  | 34 | +    const int64_t i3 = (idx / (ne00 * ne01 * ne02)) % ne03; | 
|  | 35 | + | 
|  | 36 | +    const int64_t d0 = wrap_index(i0 - s0, ne00); | 
|  | 37 | +    const int64_t d1 = wrap_index(i1 - s1, ne01); | 
|  | 38 | +    const int64_t d2 = wrap_index(i2 - s2, ne02); | 
|  | 39 | +    const int64_t d3 = wrap_index(i3 - s3, ne03); | 
|  | 40 | + | 
|  | 41 | +    dst[i3 * (ne00 * ne01 * ne02) + i2 * (ne01 * ne00) + i1 * ne00 + i0] = | 
|  | 42 | +        src[d3 * (ne00 * ne01 * ne02) + d2 * (ne01 * ne00) + d1 * ne00 + d0]; | 
|  | 43 | +} | 
|  | 44 | + | 
|  | 45 | +void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { | 
|  | 46 | +    int s0 = dst->op_params[0]; | 
|  | 47 | +    int s1 = dst->op_params[1]; | 
|  | 48 | +    int s2 = dst->op_params[2]; | 
|  | 49 | +    int s3 = dst->op_params[3]; | 
|  | 50 | + | 
|  | 51 | +    const ggml_tensor * src0   = dst->src[0]; | 
|  | 52 | +    const float *       src0_d = (const float *) dst->src[0]->data; | 
|  | 53 | +    float *             dst_d  = (float *) dst->data; | 
|  | 54 | + | 
|  | 55 | +    GGML_TENSOR_UNARY_OP_LOCALS; | 
|  | 56 | + | 
|  | 57 | +    GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); | 
|  | 58 | +    GGML_ASSERT(ggml_are_same_shape(dst->src[0], dst)); | 
|  | 59 | + | 
|  | 60 | +    cudaStream_t stream = ctx.stream(); | 
|  | 61 | + | 
|  | 62 | +    int64_t sz         = (ne00 * ne01 * ne02 * ne03); | 
|  | 63 | +    int64_t num_blocks = (sz + CUDA_ROLL_BLOCK_SIZE - 1) / CUDA_ROLL_BLOCK_SIZE; | 
|  | 64 | + | 
|  | 65 | +    roll_f32_cuda<<<num_blocks, CUDA_ROLL_BLOCK_SIZE, 0, stream>>>( | 
|  | 66 | +        src0_d, dst_d, ne00, ne01, ne02, ne03, s0, s1, s2, s3); | 
|  | 67 | +} | 
0 commit comments