diff --git a/ggml.c b/ggml.c index 98bc8e6167b130..38b16496677eb7 100644 --- a/ggml.c +++ b/ggml.c @@ -10720,7 +10720,7 @@ enum ggml_opt_result ggml_opt( //////////////////////////////////////////////////////////////////////////////// -size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist) { +size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int qk, int64_t * hist) { const int nb = k / qk; const size_t bs = (sizeof(float) + sizeof(uint8_t)*qk/2); const size_t row_size = nb*bs; @@ -10750,7 +10750,7 @@ size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t return (n/k)*row_size; } -size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t * hist) { +size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int qk, int64_t * hist) { const int nb = k / qk; const size_t bs = (2*sizeof(float) + sizeof(uint8_t)*qk/2); const size_t row_size = nb*bs; diff --git a/ggml.h b/ggml.h index 48b6cc028f901d..c7e6814a8bfde7 100644 --- a/ggml.h +++ b/ggml.h @@ -745,8 +745,8 @@ enum ggml_opt_result ggml_opt( // quantization // -size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist); -size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t * hist); +size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int qk, int64_t * hist); +size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int qk, int64_t * hist); // // system info diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4990c34322af07..6a4170f8032489 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,9 @@ -set(TEST_TARGET test-tokenizer-0) -add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils) -add_test(NAME ${TEST_TARGET} COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) +function(llama_add_test source) + get_filename_component(TEST_TARGET ${source} NAME_WE) + add_executable(${TEST_TARGET} ${source}) + target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils) + add_test(NAME ${TEST_TARGET} COMMAND $ ${ARGN}) +endfunction() + +llama_add_test(test-quantize.c) +llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) diff --git a/tests/test-quantize.c b/tests/test-quantize.c new file mode 100644 index 00000000000000..d59ecb8abde893 --- /dev/null +++ b/tests/test-quantize.c @@ -0,0 +1,42 @@ +#include "ggml.h" +#undef NDEBUG +#include +#include + +int main(void) { + #define QK 32 + float src[QK]; + uint8_t dst[24]; + int64_t hist[16]; + + for (int i = 0; i < QK; i++) { + src[i] = (float)(i + 1); + } + + size_t size = ggml_quantize_q4_0(src, dst, QK, QK, QK, hist); + assert(size == 20); + float max_result = ((float *)dst)[0]; + float max_expected = src[31] / ((1 << 3) - 1); + assert(max_result == max_expected); + for (int i = 0; i < QK; i++) { + uint8_t q4_result = (i % 2) ? (dst[sizeof(float) + i/2] >> 4) : (dst[sizeof(float) + i/2] & 0xF); + uint8_t q4_expected = roundf(src[i] / max_expected) + 8; + assert(q4_result == q4_expected); + } + + size = ggml_quantize_q4_1(src, dst, QK, QK, QK, hist); + assert(size == 24); + float delta_result = ((float *)dst)[0]; + float delta_expected = (src[31] - src[0]) / ((1 << 4) - 1); + assert(delta_result == delta_expected); + float min_result = ((float *)dst)[1]; + float min_expected = src[0]; + assert(min_result == min_expected); + for (int i = 0; i < QK; i++) { + uint8_t q4_result = (i % 2) ? (dst[sizeof(float)*2 + i/2] >> 4) : (dst[sizeof(float)*2 + i/2] & 0xF); + uint8_t q4_expected = roundf((src[i] - min_expected) / delta_expected); + assert(q4_result == q4_expected); + } + + return 0; +}