@@ -4476,6 +4476,13 @@ static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
44764476    *dsti = __float2half (*xi);
44774477}
44784478
4479+ static  __device__  void  cpy_1_f16_f16 (const  char  * cxi, char  * cdsti) {
4480+     const  half * xi = (const  half *) cxi;
4481+     half * dsti = (half *) cdsti;
4482+ 
4483+     *dsti = *xi;
4484+ }
4485+ 
44794486template  <cpy_kernel_t  cpy_1>
44804487static  __global__  void  cpy_f32_f16 (const  char  * cx, char  * cdst, const  int  ne,
44814488                                   const  int  ne00, const  int  ne01, const  int  nb00, const  int  nb01, const  int  nb02,
@@ -4729,6 +4736,25 @@ static __global__ void clamp_f32(const float * x, float * dst, const float min,
47294736    dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
47304737}
47314738
4739+ static   __global__  void  im2col_f32_f16 (
4740+         const  float  * x, half * dst,
4741+         int  ofs0, int  ofs1, int  IW, int  IH, int  CHW,
4742+         int  s0, int  s1, int  p0, int  p1, int  d0, int  d1) {
4743+     const  int  iiw = blockIdx .z  * s0 + threadIdx .z  * d0 - p0;
4744+ 	const  int  iih = blockIdx .y  * s1 + threadIdx .y  * d1 - p1;
4745+ 
4746+     const  int  offset_dst =
4747+         (threadIdx .x  * gridDim .y  * gridDim .z  + blockIdx .y  * gridDim .z  + blockIdx .z ) * CHW +
4748+         (blockIdx .x  * (blockDim .y  * blockDim .z ) + threadIdx .y  * blockDim .z  + threadIdx .z );
4749+ 
4750+     if  (!(iih < 0  || iih >= IH || iiw < 0  || iiw >= IW)) {
4751+         const  int  offset_src =  threadIdx .x  * ofs0 + blockIdx .x  * ofs1;
4752+         dst[offset_dst] = __float2half (x[offset_src + iih * IW + iiw]);
4753+     } else  {
4754+         dst[offset_dst] = __float2half (0 .0f );
4755+     }
4756+ }
4757+ 
47324758template <int  qk, int  qr, dequantize_kernel_t  dq>
47334759static  void  get_rows_cuda (const  void  * x, const  int32_t  * y, float  * dst, const  int  nrows, const  int  ncols, cudaStream_t stream) {
47344760    const  dim3  block_dims (CUDA_GET_ROWS_BLOCK_SIZE, 1 , 1 );
@@ -5618,6 +5644,16 @@ static void ggml_cpy_f32_f16_cuda(
56185644        (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
56195645}
56205646
5647+ static  void  ggml_cpy_f16_f16_cuda (
5648+     const  char  * cx, char  * cdst, const  int  ne,
5649+     const  int  ne00, const  int  ne01, const  int  nb00, const  int  nb01, const  int  nb02,
5650+     const  int  ne10, const  int  ne11, const  int  nb10, const  int  nb11, const  int  nb12, cudaStream_t stream) {
5651+ 
5652+     const  int  num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1 ) / CUDA_CPY_BLOCK_SIZE;
5653+     cpy_f32_f16<cpy_1_f16_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0 , stream>>> 
5654+         (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
5655+ }
5656+ 
56215657static  void  scale_f32_cuda (const  float  * x, float  * dst, const  float  scale, const  int  k, cudaStream_t stream) {
56225658    const  int  num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1 ) / CUDA_SCALE_BLOCK_SIZE;
56235659    scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0 , stream>>> (x, dst, scale, k);
@@ -5701,6 +5737,15 @@ static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, c
57015737    soft_max_f32<<<block_nums, block_dims, 0 , stream>>> (x, dst, ncols_x);
57025738}
57035739
5740+ static  void  im2col_f32_f16_cuda (const  float  * x, half * dst,
5741+     int  OH, int  IW, int  IH, int  OW, int  IC,
5742+     int  KH, int  KW, int  N,  int  ofs0, int  ofs1,
5743+     int  s0, int  s1, int  p0, int  p1, int  d0, int  d1, cudaStream_t stream) {
5744+     dim3  block_nums (IC, OH, OW);
5745+     dim3  block_dims (N,  KH, KW);
5746+     im2col_f32_f16<<<block_nums, block_dims, 0 , stream>>> (x, dst, ofs0, ofs1, IW, IH, (IC * KH * KW), s0, s1, p0, p1, d0, d1);
5747+ }
5748+ 
57045749//  buffer pool for cuda
57055750#define  MAX_CUDA_BUFFERS  256 
57065751
@@ -6483,7 +6528,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
64836528            src1_as_f16 = (half *) ggml_cuda_pool_malloc_async (ne * sizeof (half), &src1_as, id, stream);
64846529            to_fp16_cuda (src1_ddf_i, src1_as_f16, ne, stream);
64856530        }
6486-         const  half * src1_ptr = src1->type  == GGML_TYPE_F16 ? (const  half *) src1_ddq_i  : src1_as_f16;
6531+         const  half * src1_ptr = src1->type  == GGML_TYPE_F16 ? (const  half *) src1_ddf_i  : src1_as_f16;
64876532        size_t  dst_f16_as = 0 ;
64886533        half * dst_f16 = (half *) ggml_cuda_pool_malloc_async (row_diff*src1_ncols * sizeof (half), &dst_f16_as, id, stream);
64896534
@@ -6659,6 +6704,45 @@ inline void ggml_cuda_op_alibi(
66596704    (void ) src1_dd;
66606705}
66616706
6707+ inline  void  ggml_cuda_op_im2col (
6708+     const  ggml_tensor * src0, const  ggml_tensor * src1, ggml_tensor * dst,
6709+     const  float  * src0_dd, const  float  * src1_dd, float  * dst_dd, const  cudaStream_t & main_stream) {
6710+ 
6711+     GGML_ASSERT (src0->type  == GGML_TYPE_F16);
6712+     GGML_ASSERT (src1->type  == GGML_TYPE_F32);
6713+     GGML_ASSERT ( dst->type  == GGML_TYPE_F16);
6714+ 
6715+     const  int32_t  s0 = ((const  int32_t *)(dst->op_params ))[0 ];
6716+     const  int32_t  s1 = ((const  int32_t *)(dst->op_params ))[1 ];
6717+     const  int32_t  p0 = ((const  int32_t *)(dst->op_params ))[2 ];
6718+     const  int32_t  p1 = ((const  int32_t *)(dst->op_params ))[3 ];
6719+     const  int32_t  d0 = ((const  int32_t *)(dst->op_params ))[4 ];
6720+     const  int32_t  d1 = ((const  int32_t *)(dst->op_params ))[5 ];
6721+ 
6722+     const  bool  is_2D = ((const  int32_t *)(dst->op_params ))[6 ] == 1 ;
6723+ 
6724+     const  int64_t  N  = src1->ne [is_2D ? 3  : 2 ];
6725+     const  int64_t  IC = src1->ne [is_2D ? 2  : 1 ];
6726+     const  int64_t  IH = is_2D ? src1->ne [1 ] : 1 ;
6727+     const  int64_t  IW =         src1->ne [0 ];
6728+ 
6729+     const  int64_t  KH = is_2D ? src0->ne [1 ] : 1 ;
6730+     const  int64_t  KW =         src0->ne [0 ];
6731+ 
6732+     const  int64_t  OH = is_2D ? dst->ne [2 ] : 1 ;
6733+     const  int64_t  OW =         dst->ne [1 ];
6734+ 
6735+     const  size_t  ofs0 = src1->nb [is_2D ? 3  : 2 ] / 4 ; //  nb is byte offset, src is type float32
6736+     const  size_t  ofs1 = src1->nb [is_2D ? 2  : 1 ] / 4 ; //  nb is byte offset, src is type float32
6737+ 
6738+     im2col_f32_f16_cuda (src1_dd, (half*) dst_dd,
6739+         OH, IW, IH, OW, IC, KH, KW, N,
6740+         ofs0, ofs1, s0, s1, p0, p1, d0, d1, main_stream);
6741+ 
6742+     (void ) src0;
6743+     (void ) src0_dd;
6744+ }
6745+ 
66626746inline  void  ggml_cuda_op_diag_mask_inf (
66636747    const  ggml_tensor * src0, const  ggml_tensor * src1, ggml_tensor * dst,
66646748    const  float  * src0_dd, const  float  * src1_dd, float  * dst_dd, const  cudaStream_t & main_stream) {
@@ -7549,6 +7633,9 @@ static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, gg
75497633    } else  if  (src0->type  == GGML_TYPE_F32 && src1->type  == GGML_TYPE_F16) {
75507634        ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
75517635                              ne10, ne11, nb10, nb11, nb12, main_stream);
7636+     } else  if  (src0->type  == GGML_TYPE_F16 && src1->type  == GGML_TYPE_F16) {
7637+         ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
7638+                               ne10, ne11, nb10, nb11, nb12, main_stream);
75527639    } else  {
75537640        fprintf (stderr, " %s: unsupported type combination (%s to %s)\n "  , __func__,
75547641                ggml_type_name (src0->type ), ggml_type_name (src1->type ));
@@ -7580,6 +7667,10 @@ static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1,
75807667    ggml_cuda_op_flatten (src0, src1, dst, ggml_cuda_op_alibi);
75817668}
75827669
7670+ void  ggml_cuda_im2col (const  ggml_tensor * src0, const  ggml_tensor * src1, ggml_tensor * dst) {
7671+     ggml_cuda_op_flatten (src0, src1, dst, ggml_cuda_op_im2col);
7672+ }
7673+ 
75837674static  void  ggml_cuda_nop (const  ggml_tensor * src0, const  ggml_tensor * src1, ggml_tensor * dst) {
75847675    (void ) src0;
75857676    (void ) src1;
@@ -7943,6 +8034,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
79438034        case  GGML_OP_ALIBI:
79448035            func = ggml_cuda_alibi;
79458036            break ;
8037+         case  GGML_OP_IM2COL:
8038+             func = ggml_cuda_im2col;
8039+             break ;
79468040        default :
79478041            return  false ;
79488042    }
0 commit comments