Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
1d1866d
Vulkan: Fix fprintf format-security warning (llama/14770)
0cc4m Jul 19, 2025
c5262c2
vulkan: Add logging for bf16 features to ggml_vk_print_gpu_info (#132…
Peter0x44 Jul 19, 2025
d45d283
ggml: adds CONV_2D op and direct GEMM Vulkan implementation (llama/14…
etasnadi Jul 19, 2025
2743522
vulkan/cuda: Fix im2col when KW!=KH (llama/14789)
jeffbolznv Jul 21, 2025
aa65fde
kleidiai: add support for get_rows (llama/14676)
chaxu01 Jul 21, 2025
de18e9a
sycl: Fix im2col (llama/14797)
Rbiessy Jul 21, 2025
58f4832
opencl: add conv2d kernel (llama/14403)
rmatif Jul 21, 2025
64088bb
opencl: fix `im2col` when `KW!=KH` (llama/14803)
CISC Jul 21, 2025
5485663
cuda: remove linking to cublasLt (llama/14790)
yeahdongcn Jul 21, 2025
5b97e8e
opencl: remove unreachable `return` (llama/14806)
lhez Jul 22, 2025
1dad821
cuda : implement bf16 cpy ops and enable bf16 cont (llama/14763)
CISC Jul 22, 2025
6bc9c97
vulkan: fix rms_norm_mul to handle broadcasting dim0 (llama/14817)
jeffbolznv Jul 22, 2025
b06d9cb
CUDA: add fused rms norm (llama/14800)
am17an Jul 23, 2025
0ed1969
CANN: weight format to NZ for Ascend310P3 (llama/14407)
tqgy6 Jul 23, 2025
a44689f
ggml: fix loongarch quantize_row_q8_1 error (llama/14827)
lixing-star Jul 23, 2025
d495158
tests : add non-cont K,V FA tests
ggerganov Jul 18, 2025
e27e2cd
CUDA: fix quantized KV cache + multiple sequences (llama/14822)
JohannesGaessler Jul 23, 2025
21c3ebd
CUDA: fix compilation with GGML_CUDA_F16 (llama/14837)
JohannesGaessler Jul 23, 2025
5d1cc39
CUDA: fix overflow in FA, tune performance (llama/14840)
JohannesGaessler Jul 23, 2025
1d54f61
sycl: fix undefined variable in work group size check (llama/14843)
djeong20 Jul 24, 2025
e282986
metal : fix fusion across different encoders (llama/14849)
ggerganov Jul 24, 2025
8dcd3dc
sycl: fixed semantics of block offset calculation (llama/14814)
Jul 24, 2025
ee456e8
sync : llama.cpp
ggerganov Jul 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion scripts/sync-llama.last
Original file line number Diff line number Diff line change
@@ -1 +1 @@
b17230917c18a25af9cd143a941001466af845a2
2df255da3cea108de0ae9b302ffdd31674b6d88d
23 changes: 21 additions & 2 deletions src/ggml-cann/aclnn_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1785,8 +1785,27 @@ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx,
size_t transpose_nb[] = {bcast_weight_nb[1], bcast_weight_nb[0],
bcast_weight_nb[2], bcast_weight_nb[3],
bcast_weight_nb[4], bcast_weight_nb[5]};
aclTensor* acl_weight_tensor =
ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims);
aclTensor* acl_weight_tensor;

bool weightToNZ = false;
#ifdef ASCEND_310P
weightToNZ = (getenv("GGML_CANN_WEIGHT_NZ") != nullptr);
#endif
if (weightToNZ && is_matmul_weight(weight)) {
int64_t acl_stride[2] = {1, transpose_ne[1]};

// Reverse ne.
std::reverse(transpose_ne, transpose_ne + n_dims);

std::vector<int64_t> storageDims = {transpose_ne[0], transpose_ne[1]};

acl_weight_tensor = aclCreateTensor(
transpose_ne, n_dims, ggml_cann_type_mapping(weight->type), acl_stride,
0, ACL_FORMAT_FRACTAL_NZ, storageDims.data(), 2, weight->data);
} else {
acl_weight_tensor =
ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_ND);
}
aclTensor* acl_dst =
ggml_cann_create_tensor(dst, bcast_dst_ne, bcast_dst_nb, n_dims);

Expand Down
32 changes: 32 additions & 0 deletions src/ggml-cann/aclnn_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#ifndef CANN_ACLNN_OPS
#define CANN_ACLNN_OPS

#include <unordered_set>
#include <functional>
#include <aclnnop/aclnn_abs.h>
#include <aclnnop/aclnn_neg.h>
Expand Down Expand Up @@ -1020,6 +1021,37 @@ inline void ggml_cann_async_memset(ggml_backend_cann_context & ctx, void * buffe
*/
void ggml_cann_mul_mat_id(ggml_backend_cann_context& ctx, ggml_tensor* dst);

/**
* @brief Check whether a tensor is a weight tensor for matrix multiplication.
*
* @details Checks whether the given tensor serves as weight parameters in matrix multiplication operations,
* typically within neural network layers. The function maintains a static set of canonical weight
* naming suffixes from Transformer-based architectures. Uses substring matching to identify weight
* tensors even with hierarchical naming patterns.
*
* @param tensor Pointer to the target ggml_tensor object (const-qualified).
*/
static bool is_matmul_weight(const ggml_tensor* tensor) {
std::string name = ggml_get_name(tensor);
static const std::unordered_set<std::string> weight_suffixes{
"output.weight",
"attn_q.weight",
"attn_k.weight",
"attn_v.weight",
"attn_output.weight",
"ffn_gate.weight",
"ffn_up.weight",
"ffn_down.weight"
};

for (const auto& suffix : weight_suffixes) {
if (name.find(suffix) != std::string::npos) {
return true;
}
}
return false;
}

/**
* @brief Applies a element-wise operation to two input tensors using the CANN
* backend.
Expand Down
65 changes: 65 additions & 0 deletions src/ggml-cann/ggml-cann.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

#include <acl/acl.h>
#include <stdarg.h>
#include <aclnnop/aclnn_trans_matmul_weight.h>

#include <cmath>
#include <cstdio>
Expand Down Expand Up @@ -1115,6 +1116,63 @@ static enum ggml_status ggml_backend_cann_buffer_init_tensor(
return GGML_STATUS_SUCCESS;
}

static int CreateAclTensorWeight(const void *hostData, const std::vector<int64_t> &shape, void **deviceAddr,
aclDataType dataType, aclTensor **tensor)
{
uint64_t size = 1;
for (auto i : shape) {
size *= i;
}

const aclIntArray *mat2Size = aclCreateIntArray(shape.data(), shape.size());
ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(mat2Size, dataType, &size));

size *= sizeof(int16_t);

ACL_CHECK(aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST));
aclrtMemcpy(*deviceAddr, size, hostData, size, ACL_MEMCPY_HOST_TO_DEVICE);

std::vector<int64_t> strides(shape.size(), 1);
for (int64_t i = shape.size() - 2; i >= 0; i--) {
strides[i] = shape[i + 1] * strides[i + 1];
}

*tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND,
shape.data(), shape.size(), *deviceAddr);
return 0;
}

static void weight_format_to_nz(ggml_tensor *tensor, const void *data, size_t offset) {
aclrtStream stream;
ACL_CHECK(aclrtCreateStream(&stream));

std::vector<int64_t> weightTransposedShape = {tensor->ne[1], tensor->ne[0]};
void *weightTransposedDeviceAddr = nullptr;
aclTensor *weightTransposed = nullptr;
CreateAclTensorWeight(data, weightTransposedShape, &weightTransposedDeviceAddr,
ggml_cann_type_mapping(tensor->type), &weightTransposed);

uint64_t workspaceSize = 0;
aclOpExecutor *executor;
void *workspaceAddr = nullptr;

// TransMatmulWeight
ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed, &workspaceSize, &executor));
std::unique_ptr<void, aclError (*)(void *)> workspaceAddrPtrTrans(nullptr, aclrtFree);
if (workspaceSize > 0) {
ACL_CHECK(aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST));
workspaceAddrPtrTrans.reset(workspaceAddr);
}
ACL_CHECK(aclnnTransMatmulWeight(workspaceAddr, workspaceSize, executor, stream));

size_t size = ggml_nelements(tensor) * ggml_element_size(tensor);

aclrtMemcpy((char *)tensor->data + offset, size,
weightTransposedDeviceAddr, size, ACL_MEMCPY_HOST_TO_DEVICE);
ACL_CHECK(aclDestroyTensor(weightTransposed));
aclrtFree(weightTransposedDeviceAddr);
}

// TODO: need handle tensor which has paddings.
/**
* @brief Set tensor data in a CANN buffer.
Expand All @@ -1139,9 +1197,16 @@ static void ggml_backend_cann_buffer_set_tensor(
// For acl, synchronous functions use this default stream.
// Why aclrtSynchronizeDevice?

bool weightToNZ = false;
#ifdef ASCEND_310P
weightToNZ = (getenv("GGML_CANN_WEIGHT_NZ") != nullptr);
#endif
if (!need_transform(tensor->type)) {
ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size,
ACL_MEMCPY_HOST_TO_DEVICE));
if (weightToNZ && is_matmul_weight((const ggml_tensor*)tensor)) {
weight_format_to_nz(tensor, data, offset);
}
} else {
void *transform_buffer = malloc(size);
ggml_backend_cann_transform(tensor, data, transform_buffer);
Expand Down
4 changes: 2 additions & 2 deletions src/ggml-cpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -496,9 +496,9 @@ function(ggml_add_cpu_backend_variant_impl tag_name)

# Fetch KleidiAI sources:
include(FetchContent)
set(KLEIDIAI_COMMIT_TAG "v1.9.0")
set(KLEIDIAI_COMMIT_TAG "v1.11.0")
set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz")
set(KLEIDIAI_ARCHIVE_MD5 "2a8e1bb55d201557553545536489a017")
set(KLEIDIAI_ARCHIVE_MD5 "3fe9e5ab964c375c53839296eb71eaa2")

if (POLICY CMP0135)
cmake_policy(SET CMP0135 NEW)
Expand Down
2 changes: 1 addition & 1 deletion src/ggml-cpu/arch/loongarch/quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i
__m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs, 0) );
max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) );
__m128 tmp = max4;
max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x10 ));
max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x1 ));
const float max_scalar = ((v4f32)max4)[0];

// Quantize these floats
Expand Down
121 changes: 109 additions & 12 deletions src/ggml-cpu/kleidiai/kernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,94 @@

#include "kai_common.h"

#include "simd-mappings.h"

#include "kernels.h"

#define NELEMS(x) sizeof(x) / sizeof(*x)

static const size_t INT4_PER_BYTE = 2;
static const size_t INT4_BITS = 4;
static const int Q4_0_ZERO_POINT = 8;
const size_t INT4_PER_UINT16 = 4;

static void dequantize_row_qsi4c32pscalef16(
const void *packed_data,
int32_t row_idx,
int64_t nc,
float *out,
size_t nr_pack,
size_t packed_row_stride,
size_t kr,
size_t bl,
size_t num_bytes_multiplier
) {
size_t group_idx = row_idx / nr_pack;
size_t row_in_group = row_idx % nr_pack;
const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride;
size_t num_blocks = nc / bl;
const uint8_t *block_ptr = packed_group;

for (size_t b = 0; b < num_blocks; ++b) {
uint16_t scale_f16 = *((const uint16_t *)(block_ptr + row_in_group * num_bytes_multiplier));
float scale = GGML_CPU_FP16_TO_FP32(scale_f16);

const uint8_t *segment_ptr = block_ptr + nr_pack * num_bytes_multiplier;
size_t num_segments = bl / kr;
size_t num_bytes_per_segment = kr / INT4_PER_BYTE;

for (size_t s = 0; s < num_segments; ++s) {
const uint8_t *seg_base = segment_ptr + s * nr_pack * num_bytes_per_segment;
const uint8_t *qbytes = seg_base + row_in_group * num_bytes_per_segment;
for (size_t k = 0; k < num_bytes_per_segment; ++k) {
uint8_t byte = qbytes[k] ^ 0x88;
int x0 = (byte & 0x0F) - Q4_0_ZERO_POINT;
int x1 = (byte >> INT4_BITS) - Q4_0_ZERO_POINT;
out[b * bl + s * num_bytes_per_segment + k] = x0 * scale;
out[b * bl + s * num_bytes_per_segment + k + bl/2] = x1 * scale;
}
}
block_ptr += nr_pack * num_bytes_multiplier + num_segments * nr_pack * num_bytes_per_segment;
}
}

static void dequantize_row_qsi4c32ps1s0scalef16(
const void *packed_data,
int32_t row_idx,
int64_t k,
float *out,
size_t nr,
size_t packed_row_stride,
size_t kr,
size_t bl,
size_t num_bytes_multiplier
) {
const size_t num_blocks = k / bl;
const size_t bl4 = bl / INT4_PER_UINT16;

size_t group_idx = row_idx / nr;
size_t row_in_group = row_idx % nr;

const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride;
const uint16_t *qdata = (const uint16_t *)packed_group;
const uint16_t *scales = (const uint16_t *)(packed_group + packed_row_stride - (nr * num_blocks * num_bytes_multiplier));

for (size_t block_idx = 0; block_idx < num_blocks; ++block_idx) {
uint16_t scale_f16 = scales[row_in_group + block_idx * nr];
float scale = GGML_CPU_FP16_TO_FP32(scale_f16);

for (size_t bl4_idx = 0; bl4_idx < bl4; ++bl4_idx) {
uint16_t q = qdata[(block_idx * bl4 + bl4_idx) * nr + row_in_group];

for (size_t qidx = 0; qidx < INT4_PER_UINT16; ++qidx) {
int v = ((q >> (qidx * 4)) & 0xF) - Q4_0_ZERO_POINT;
out[block_idx * bl + bl4_idx * INT4_BITS + qidx] = v * scale;
}
}
}
GGML_UNUSED(kr);
}

static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
#if defined(__ARM_FEATURE_SME)
{
Expand Down Expand Up @@ -63,8 +148,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32_neon,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon,
/* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon,
/* .to_float = */ dequantize_row_qsi4c32ps1s0scalef16,
},
/* .required_cpu = */ CPU_FEATURE_SME,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down Expand Up @@ -107,8 +194,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_pack_bf16p2vlx2_f32_sme,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme,
/* .pack_func = */ kai_run_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme,
/* .packed_stride = */ NULL,
/* .pack_func = */ kai_run_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme,
/* .to_float = */ NULL,
},
/* .required_cpu = */ CPU_FEATURE_SME,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down Expand Up @@ -154,8 +243,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .to_float = */ dequantize_row_qsi4c32pscalef16,
},
/* .required_cpu = */ CPU_FEATURE_DOTPROD,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down Expand Up @@ -200,8 +291,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .to_float = */ dequantize_row_qsi4c32pscalef16,
},
/* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down Expand Up @@ -247,8 +340,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .to_float = */ dequantize_row_qsi4c32pscalef16,
},
/* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down Expand Up @@ -293,8 +388,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = {
/* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32,
},
/* .rhs_info = */ {
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .pack_func = */ kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0,
/* .to_float = */ dequantize_row_qsi4c32pscalef16,
},
/* .required_cpu = */ CPU_FEATURE_DOTPROD,
/* .lhs_type = */ GGML_TYPE_F32,
Expand Down
3 changes: 3 additions & 0 deletions src/ggml-cpu/kleidiai/kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,15 @@ struct rhs_packing_info {
std::function<size_t(size_t n, size_t k, size_t nr, size_t kr, size_t bl)>,
std::function<size_t(size_t n, size_t k)>
> packed_size;
size_t (*packed_stride)(size_t k, size_t nr, size_t kr, size_t bl);
std::variant<
std::function<void(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t bl, const uint8_t* rhs,
const float* bias, void* rhs_packed, size_t extra_bytes, const struct kai_rhs_pack_qs4cxs1s0_param* params)>,
std::function<void(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t rhs_stride, const void* rhs,
const void* bias, const void* scale, void* rhs_packed, size_t extra_bytes, const void* params)>
> pack_func;
void (*to_float)(const void *packed_data, int32_t row_idx, int64_t nc, float *out, size_t nr_pack, size_t packed_row_stride,
size_t kr, size_t bl, size_t num_bytes_multiplier);
};

struct ggml_kleidiai_kernels {
Expand Down
Loading
Loading