Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions ggml/src/ggml-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,7 @@ static inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph,
#endif

#ifdef __cplusplus
#include <array>
#include <initializer_list>
#include <vector>

Expand All @@ -697,6 +698,21 @@ inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph,
return ggml_can_fuse_subgraph(cgraph, start_idx, ops.size(), ops.begin(), outputs.begin(), outputs.size());
}

// Return true if the edges in the graph match expectations.
inline bool ggml_check_edges(const struct ggml_cgraph * cgraph,
int start_idx,
std::initializer_list<std::array<int, 3>> edges) {
for (const auto &edge : edges) {
int dst_node = edge[0];
int src_idx = edge[1];
int src_node = edge[2];
if (cgraph->nodes[start_idx + dst_node]->src[src_idx] != cgraph->nodes[start_idx + src_node]) {
return false;
}
}
return true;
}

// expose GGUF internals for test code
GGML_API size_t gguf_type_size(enum gguf_type type);
GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
Expand Down
332 changes: 246 additions & 86 deletions ggml/src/ggml-vulkan/ggml-vulkan.cpp

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions ggml/src/ggml-vulkan/vulkan-shaders/rope_head.glsl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
layout (binding = 1) readonly buffer Y {int data_pos[];};
layout (binding = 2) readonly buffer Z {float data_ff[];};
layout (binding = 3) writeonly buffer D {D_TYPE data_d[];};
layout (binding = 4) readonly buffer I {uvec2 data_i[];}; // indices for set_rows

layout (push_constant) uniform parameter {
uint ncols;
Expand All @@ -27,6 +28,7 @@ layout (push_constant) uniform parameter {
uint s2;
int sections[4];
uint is_back;
uint set_rows_stride;
} p;

float rope_yarn_ramp(const float low, const float high, const uint i0) {
Expand Down
13 changes: 10 additions & 3 deletions ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,19 @@ void main() {
const uint row_x = row_dst % ne1;
const uint channel_x = row_dst / ne1;

const uint idst = row_dst*ne0 + i0/2;
uint idst = row_dst*ne0 + i0/2;
const uint ix = channel_x*p.s2 + row_x*p.s1 + i0/2;

// Fusion optimization: ROPE + VIEW + SET_ROWS..
// The rope output is viewed as a 1D tensor and offset based on a row index in data_i.
if (p.set_rows_stride != 0) {
idst = row_x*ne0 + i0/2;
idst += data_i[channel_x].x * p.set_rows_stride;
}

if (i0 >= p.n_dims) {
data_d[idst + i0/2 + 0] = data_a[ix + i0/2 + 0];
data_d[idst + i0/2 + 1] = data_a[ix + i0/2 + 1];
data_d[idst + i0/2 + 0] = D_TYPE(data_a[ix + i0/2 + 0]);
data_d[idst + i0/2 + 1] = D_TYPE(data_a[ix + i0/2 + 1]);

return;
}
Expand Down
13 changes: 10 additions & 3 deletions ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,19 @@ void main() {
const uint row_x = row_dst % ne1;
const uint channel_x = row_dst / ne1;

const uint idst = row_dst*ne0 + i0;
uint idst = row_dst*ne0 + i0;
const uint ix = channel_x*p.s2 + row_x*p.s1 + i0;

// Fusion optimization: ROPE + VIEW + SET_ROWS..
// The rope output is viewed as a 1D tensor and offset based on a row index in data_i.
if (p.set_rows_stride != 0) {
idst = row_x*ne0 + i0;
idst += data_i[channel_x].x * p.set_rows_stride;
}

if (i0 >= p.n_dims) {
data_d[idst + 0] = data_a[ix + 0];
data_d[idst + 1] = data_a[ix + 1];
data_d[idst + 0] = D_TYPE(data_a[ix + 0]);
data_d[idst + 1] = D_TYPE(data_a[ix + 1]);

return;
}
Expand Down
4 changes: 4 additions & 0 deletions ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -841,10 +841,14 @@ void process_shaders() {
string_to_spv("rope_norm_f32", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
string_to_spv("rope_norm_f16", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
string_to_spv("rope_norm_f16_rte", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});
string_to_spv("rope_norm_f32_f16", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
string_to_spv("rope_norm_f32_f16_rte", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});

string_to_spv("rope_neox_f32", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
string_to_spv("rope_neox_f16", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
string_to_spv("rope_neox_f16_rte", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});
string_to_spv("rope_neox_f32_f16", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
string_to_spv("rope_neox_f32_f16_rte", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});

string_to_spv("rope_multi_f32", "rope_multi.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
string_to_spv("rope_multi_f16", "rope_multi.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
Expand Down
122 changes: 97 additions & 25 deletions tests/test-backend-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2105,6 +2105,34 @@ struct test_get_rows_back : public test_case {
}
};

static void init_set_rows_row_ids(ggml_tensor * t, int num_rows) {
std::random_device rd;
std::default_random_engine rng(rd());
for (int i2 = 0; i2 < t->ne[2]; i2++) {
for (int i1 = 0; i1 < t->ne[1]; i1++) {
// generate a shuffled subset of row indices
std::vector<int64_t> data(num_rows);
for (int i = 0; i < num_rows; i++) {
data[i] = i;
}
std::shuffle(data.begin(), data.end(), rng);
data.resize(t->ne[0]);

const size_t offs = i1*t->nb[1] + i2*t->nb[2];
if (t->type == GGML_TYPE_I32) {
// TODO: Make a template or something
std::vector<int32_t> data_i32(t->ne[0]);
for (int i = 0; i < t->ne[0]; i++) {
data_i32[i] = static_cast<int32_t>(data[i]);
}
ggml_backend_tensor_set(t, data_i32.data(), offs, t->ne[0]*sizeof(int32_t));
} else {
ggml_backend_tensor_set(t, data.data(), offs, t->ne[0]*sizeof(int64_t));
}
}
}
}

// GGML_OP_SET_ROWS
struct test_set_rows : public test_case {
const ggml_type type;
Expand Down Expand Up @@ -2148,37 +2176,13 @@ struct test_set_rows : public test_case {
}

void initialize_tensors(ggml_context * ctx) override {
std::random_device rd;
std::default_random_engine rng(rd());
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (t->type == GGML_TYPE_I64 || t->type == GGML_TYPE_I32) {
if (ggml_is_view_op(t->op)) {
continue;
}

for (int i2 = 0; i2 < t->ne[2]; i2++) {
for (int i1 = 0; i1 < t->ne[1]; i1++) {
// generate a shuffled subset of row indices
std::vector<int64_t> data(ne[1]);
for (int i = 0; i < ne[1]; i++) {
data[i] = i;
}
std::shuffle(data.begin(), data.end(), rng);
data.resize(t->ne[0]);

const size_t offs = i1*t->nb[1] + i2*t->nb[2];
if (t->type == GGML_TYPE_I32) {
// TODO: Make a template or something
std::vector<int32_t> data_i32(t->ne[0]);
for (int i = 0; i < t->ne[0]; i++) {
data_i32[i] = static_cast<int32_t>(data[i]);
}
ggml_backend_tensor_set(t, data_i32.data(), offs, t->ne[0]*sizeof(int32_t));
} else {
ggml_backend_tensor_set(t, data.data(), offs, t->ne[0]*sizeof(int64_t));
}
}
}
init_set_rows_row_ids(t, ne[1]);
} else {
init_tensor_uniform(t);
}
Expand Down Expand Up @@ -2207,6 +2211,67 @@ struct test_set_rows : public test_case {
}
};

// GGML_OP_ROPE + GGML_OP_VIEW + GGML_OP_SET_ROWS
struct test_rope_set_rows : public test_case {
const ggml_type type;
const ggml_type type_idx;
const std::array<int64_t, 4> ne;
int mode;

std::string vars() override {
return VARS_TO_STR4(type, type_idx, ne, mode);
}

std::string op_desc(ggml_tensor * t) override {
GGML_UNUSED(t);
return "ROPE_SET_ROWS";
}

bool run_whole_graph() override { return true; }

test_rope_set_rows(ggml_type type,
ggml_type type_idx,
std::array<int64_t, 4> ne,
int mode)
: type(type), type_idx(type_idx), ne(ne), mode(mode) {}

ggml_tensor * build_graph(ggml_context * ctx) override {
ggml_tensor * src = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, ne[0], ne[1], ne[2], 1);
ggml_set_name(src, "src");

ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne[2]);

ggml_tensor * rope = ggml_rope(ctx, src, pos, ne[0], mode);

ggml_tensor * view = ggml_view_2d(ctx, rope, ne[0] * ne[1], ne[2], rope->nb[2], 0);

ggml_tensor * dst = ggml_new_tensor_4d(ctx, type, ne[0] * ne[1], ne[2] * ne[3], 1, 1);
ggml_set_name(dst, "dst");

ggml_tensor * row_idxs = ggml_new_tensor_3d(ctx, type_idx, ne[2], 1, 1);
ggml_set_name(row_idxs, "row_idxs");

ggml_tensor * out = ggml_set_rows(ctx, dst, view, row_idxs);
ggml_set_name(out, "out");

return out;
}

void initialize_tensors(ggml_context * ctx) override {
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (t->type == GGML_TYPE_I64 || t->type == GGML_TYPE_I32) {
if (ggml_is_view_op(t->op)) {
continue;
}

init_set_rows_row_ids(t, ne[2]);
} else {
init_tensor_uniform(t);
}
}
}
};

// GGML_OP_ARGMAX
struct test_argmax : public test_case {
const ggml_type type;
Expand Down Expand Up @@ -6008,6 +6073,13 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
}
}

for (int mode : { GGML_ROPE_TYPE_NORMAL, GGML_ROPE_TYPE_NEOX }) {
for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
test_cases.emplace_back(new test_rope_set_rows(type, GGML_TYPE_I64, { 128, 32, 1, 100 }, mode));
test_cases.emplace_back(new test_rope_set_rows(type, GGML_TYPE_I64, { 128, 32, 512, 1 }, mode));
}
}

for (ggml_type type_input : {GGML_TYPE_F32}) {
for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
for (int k0 : {1, 3}) {
Expand Down
Loading