Skip to content

Commit

Permalink
ggml : fixed runtime bugs and compile errors related to GGML_PERF and…
Browse files Browse the repository at this point in the history
… GGML_DEBUG (ggerganov#2219)

* fixed runtime bugs and compile errors related to GGML_PERF and GGML_DEBUG

* remove ifdef GGML_PERF; update fmt
  • Loading branch information
mqy authored Jul 16, 2023
1 parent 27ab66e commit 672dda1
Showing 1 changed file with 5 additions and 10 deletions.
15 changes: 5 additions & 10 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -4412,8 +4412,8 @@ void ggml_free(struct ggml_context * ctx) {
if (&g_state.contexts[i].context == ctx) {
g_state.contexts[i].used = false;

GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
__func__, i, ggml_used_mem(ctx));

if (ctx->mem_buffer_owned) {
GGML_ALIGNED_FREE(ctx->mem_buffer);
Expand Down Expand Up @@ -16317,8 +16317,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
if (GGML_OP_HAS_FINALIZE[node->op]) {
params.nth = n_tasks_arr[node_n];
ggml_compute_forward(&params, node);
ggml_graph_compute_perf_stats_node(node, state->shared);
}
ggml_graph_compute_perf_stats_node(node, state->shared);
}

// distribute new work or execute it direct if 1T
Expand Down Expand Up @@ -16348,8 +16348,9 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
if (GGML_OP_HAS_FINALIZE[node->op]) {
params.type = GGML_TASK_FINALIZE;
ggml_compute_forward(&params, node);
ggml_graph_compute_perf_stats_node(node, state->shared);
}

ggml_graph_compute_perf_stats_node(node, state->shared);
} else {
break;
}
Expand Down Expand Up @@ -16891,9 +16892,6 @@ static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char
}

void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
//assert(cgraph->work == NULL);
//assert(cgraph->work_size == 0);

uint64_t size_eval = 0;

// compute size of intermediate results
Expand Down Expand Up @@ -17332,9 +17330,6 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) {

GGML_PRINT("=== GRAPH ===\n");

GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size);

GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
for (int i = 0; i < cgraph->n_nodes; i++) {
struct ggml_tensor * node = cgraph->nodes[i];
Expand Down

0 comments on commit 672dda1

Please sign in to comment.