Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

k-quants #1684

Merged
merged 32 commits into from
Jun 5, 2023
Merged
Changes from 1 commit
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
8673a41
Starting to add k-quantization to ggml
Kawrakow May 27, 2023
b4f7134
Adding Q3_K and Q8_K (de)-quantization
Kawrakow May 27, 2023
c93cce3
Q3_K now working on CUDA and AVX2/scalar
Kawrakow May 28, 2023
a3c0673
Some improvement for Q3_K on CUDA
Kawrakow May 28, 2023
3d8b1de
Some more CUDA optimizations for Q3_K
Kawrakow May 29, 2023
a0b8e9f
Adding Q4_K - scalar, AVX2, CUDA
Kawrakow May 29, 2023
cf221af
Adding Q6_K - scalar, AVX2, CUDA
Kawrakow May 29, 2023
b835d0f
Adding Q5_K - scalar, AVX2, CUDA
Kawrakow May 29, 2023
5c5191a
Per convention, all QX_K quantizations use Q5_K for output.weight
Kawrakow May 29, 2023
d537b97
Adding quantization mixes
Kawrakow May 29, 2023
54f808d
Quantization mixes: didn't quite get what I wanted in the last commit
Kawrakow May 29, 2023
a2533a7
Q4_K dot product for ARM_NEON
Kawrakow May 30, 2023
5ca15ce
Q6_K dot product for ARM_NEON
Kawrakow May 30, 2023
a197eb5
Q5_K dot product for ARM_NEON
Kawrakow May 30, 2023
13264fa
Adding Q3_K dot for ARM_NEON
Kawrakow May 30, 2023
4faa040
A very slightly faster ARM_NEON Q3_K dot
Kawrakow May 31, 2023
b439efb
Adding Q2_K - just CUDA for now
Kawrakow May 31, 2023
8516fdf
Adding scalar and AVX2 Q2_K dot
Kawrakow May 31, 2023
6ec7057
Adding ARM_NEON Q2_K dot
Kawrakow May 31, 2023
7bcc376
A slightly faster ARM_NEON Q2_K dot
Kawrakow Jun 1, 2023
e51ce72
Fixed bug in Q2_K CUDA dot product kernel
Kawrakow Jun 1, 2023
c5959d5
Don't print zeros/NaNs when no count histogram has been collected
Kawrakow Jun 1, 2023
9a9c5a0
A 10% faster CUDA vector dot kernel for Q3_K
Kawrakow Jun 1, 2023
894210a
A slightly daster Q4_K AVX2 dot product
Kawrakow Jun 2, 2023
abd99a8
A slightly faster ARM_NEON A4_K dot product
Kawrakow Jun 3, 2023
8f5d42d
Minor
Kawrakow Jun 3, 2023
6ef1382
Fix quantization error test
Kawrakow Jun 3, 2023
0a71a4e
Fix docker build
Kawrakow Jun 3, 2023
431693c
Added forgotten ggml.o dependence on k_quants.h to the Makefile
Kawrakow Jun 4, 2023
32a5f3a
Had unintentionally committed the Makefile with -Ofast enabled
Kawrakow Jun 4, 2023
12d4344
ggml : rename k_quants -> ggml-quants-k, use lowercase in code
ggerganov Jun 5, 2023
af275fa
Merge branch 'master' into ik/k_quants
ggerganov Jun 5, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Adding ARM_NEON Q2_K dot
About the same performance as Q4_K.
  • Loading branch information
Kawrakow committed Jun 3, 2023
commit 6ec70579cb266fbac560bac8dc053a176cab381c
96 changes: 95 additions & 1 deletion k_quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -1009,7 +1009,101 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri

const int nb = n / QK_K;

#ifdef __AVX2__
#ifdef __ARM_NEON

const uint8x16_t m3 = vdupq_n_u8(0x3);
const uint8x16_t m4 = vdupq_n_u8(0xF);
const int32x4_t vzero = vdupq_n_s32(0);

int8x16x4_t q2bytes;
uint8_t aux[16];

float sum = 0;

for (int i = 0; i < nb; ++i) {

const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);

const uint8_t * restrict q2 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
const uint8_t * restrict sc = x[i].scales;

const uint8x16_t mins_and_scales = vld1q_u8(sc);
const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
vst1q_u8(aux, scales);
const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums);
const int16x8x2_t mins16 = {vmovl_u8(vget_low_u8(mins)), vmovl_u8(vget_high_u8(mins))};
const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));

int isum = 0;
int is = 0;

for (int j = 0; j < QK_K/128; ++j) {

const uint8x16x2_t q2bits = vld1q_u8_x2(q2); q2 += 32;
const int8x16x4_t q8bytes_1 = vld1q_s8_x4(q8); q8 += 64;
const int8x16x4_t q8bytes_2 = vld1q_s8_x4(q8); q8 += 64;

q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], 2), m3));
q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], 2), m3));

#if defined(__ARM_FEATURE_DOTPROD)
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes_1.val[0])) * aux[is+0];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes_1.val[1])) * aux[is+1];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes_1.val[2])) * aux[is+2];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes_1.val[3])) * aux[is+3];
#else
int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes_1.val[0])),
vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes_1.val[0])));
int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes_1.val[1])),
vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes_1.val[1])));
int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes_1.val[2])),
vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes_1.val[2])));
int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes_1.val[3])),
vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes_1.val[3])));
isum += vaddvq_s16(p0) * aux[is+0] + vaddvq_s16(p1) * aux[is+1] + vaddvq_s16(p2) * aux[is+2] + vaddvq_s16(p3) * aux[is+3];
#endif
is += 4;

q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], 4), m3));
q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], 4), m3));
q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], 6), m3));
q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], 6), m3));

#if defined(__ARM_FEATURE_DOTPROD)
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes_2.val[0])) * aux[is+0];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes_2.val[1])) * aux[is+1];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes_2.val[2])) * aux[is+2];
isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes_2.val[3])) * aux[is+3];
#else
p0 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes_2.val[0])),
vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes_2.val[0])));
p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes_2.val[1])),
vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes_2.val[1])));
p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes_2.val[2])),
vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes_2.val[2])));
p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes_2.val[3])),
vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes_2.val[3])));
isum += vaddvq_s16(p0) * aux[is+0] + vaddvq_s16(p1) * aux[is+1] + vaddvq_s16(p2) * aux[is+2] + vaddvq_s16(p3) * aux[is+3];
#endif
is += 4;

}
sum += d * isum;

}

*s = sum;

#elif defined __AVX2__

const __m256i m3 = _mm256_set1_epi8(3);
const __m128i m4 = _mm_set1_epi8(0xF);
Expand Down