@@ -1405,6 +1405,7 @@ static bool whisper_encode(
14051405 }
14061406 }
14071407
1408+ #ifndef WHISPER_USE_COREML
14081409 struct ggml_tensor * cur;
14091410
14101411 // convolution + gelu
@@ -1704,9 +1705,6 @@ static bool whisper_encode(
17041705 wctx.use_buf (ctx0, -1 );
17051706
17061707 // run the computation
1707- #ifdef WHISPER_USE_COREML
1708- whisper_coreml_encode (wctx.ctx_coreml , (float *) mel->data , (float *) cur->data );
1709- #else
17101708 {
17111709 struct ggml_cgraph gf = {};
17121710 gf.n_threads = n_threads;
@@ -1716,6 +1714,12 @@ static bool whisper_encode(
17161714
17171715 // ggml_graph_print(&gf);
17181716 }
1717+ #else
1718+ wctx.use_buf (ctx0, -1 );
1719+
1720+ struct ggml_tensor * cur = ggml_new_tensor_2d (ctx0, GGML_TYPE_F32, n_state, n_ctx);
1721+
1722+ whisper_coreml_encode (wctx.ctx_coreml , (float *) mel->data , (float *) cur->data );
17191723#endif
17201724
17211725 // cur
@@ -2569,12 +2573,15 @@ struct whisper_context * whisper_init_from_file(const char * path_model) {
25692573#ifdef WHISPER_USE_COREML
25702574 const auto path_coreml = whisper_get_coreml_path (ctx->path_model );
25712575 fprintf (stderr, " %s: loading Core ML model from '%s'\n " , __func__, path_coreml.c_str ());
2576+ fprintf (stderr, " %s: first run on a device may take a while ...\n " , __func__);
25722577
25732578 ctx->ctx_coreml = whisper_coreml_init (path_coreml.c_str ());
25742579 if (!ctx->ctx_coreml ) {
25752580 fprintf (stderr, " %s: failed to load Core ML model from '%s'\n " , __func__, path_coreml.c_str ());
25762581 return nullptr ;
25772582 }
2583+
2584+ fprintf (stderr, " %s: Core ML model loaded\n " , __func__);
25782585#endif
25792586 }
25802587
0 commit comments