From 8ced9f7e3225adb8501e9821ed1bbd92e3a5c7ae Mon Sep 17 00:00:00 2001 From: Neo Zhang Jianyu Date: Wed, 6 Mar 2024 12:08:32 +0800 Subject: [PATCH] add wait() to make code stable (#5895) --- ci/run.sh | 3 ++- ggml-sycl.cpp | 59 +++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 48 insertions(+), 14 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 35eb3c7aa00ef..51f4c74cc2cf5 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -45,7 +45,8 @@ fi if [ ! -z ${GG_BUILD_SYCL} ]; then if [ -z ${ONEAPI_ROOT} ]; then - echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:\n source /opt/intel/oneapi/setvars.sh" + echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:" + echo "source /opt/intel/oneapi/setvars.sh" exit 1 fi diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 477f5cb02db52..ddd951dd684a7 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -3769,8 +3769,42 @@ void log_ggml_var_device(const char*name, float *src, size_t total_elements, boo std::ofstream logfile; logfile.open(filename); for(size_t i=0; imemcpy(local_buf, src, total_size).wait(); + } + else { + local_buf = (sycl::half *)src; + } + + std::ofstream logfile; + logfile.open(filename); + for(size_t i=0; iwait(); const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16); to_fp32_sycl(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); } @@ -14159,6 +14193,7 @@ inline void ggml_sycl_op_mul_mat_sycl( dpct::get_value(&alpha, *g_sycl_handles[id]), src0_ddf_i, ne00, src1_ddf1_i, ne10, dpct::get_value(&beta, *g_sycl_handles[id]), dst_dd_i, ldc))); + g_sycl_handles[id]->wait(); } (void) dst; (void) src1_ddq_i; @@ -15295,8 +15330,8 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, sycl_pool_alloc dst_f16; char * dst_t; - dpct::library_data_t cu_compute_type = dpct::library_data_t::real_half; - dpct::library_data_t cu_data_type = dpct::library_data_t::real_half; + dpct::library_data_t cu_compute_type = dpct::library_data_t::real_float; + dpct::library_data_t cu_data_type = dpct::library_data_t::real_float; // dst strides size_t nbd2 = dst->nb[2]; @@ -15308,15 +15343,13 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, const float alpha_f32 = 1.0f; const float beta_f32 = 0.0f; - const void * alpha = &alpha_f16; - const void * beta = &beta_f16; + const void * alpha = &alpha_f32; + const void * beta = &beta_f32; // TODO: Renable (dst->op_params[0] =! GGML_PREC_DEFAULT) pathway - // once oneMKL open source supports half, half, float, float: datatypes - dst_t = (char *) dst_f16.alloc(ne_dst); + // oneMKL open source supports half, half, float, float: datatypes - nbd2 /= sizeof(float) / sizeof(sycl::half); - nbd3 /= sizeof(float) / sizeof(sycl::half); + dst_t = (char *) dst_ddf; GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); @@ -15356,6 +15389,7 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, nb11 / nb10, nb12 / nb10, beta, (char *)dst_t, cu_data_type, ne01, nb2 / nb0, ne12 * ne13, cu_compute_type))); + g_sycl_handles[g_main_device]->wait(); } else { const int ne23 = ne12*ne13; @@ -15386,7 +15420,7 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, nb02, nb03, nb12_scaled, nb13_scaled, nbd2, nbd3, r2, r3, item_ct1); }); - }); + }).wait(); } SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch( *g_sycl_handles[g_main_device], oneapi::mkl::transpose::trans, @@ -15397,11 +15431,10 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, dpct::library_data_t::real_half, nb11 / nb10, beta, (void **)(ptrs_dst.get() + 0 * ne23), cu_data_type, ne01, ne23, cu_compute_type))); + g_sycl_handles[g_main_device]->wait(); } #endif - const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16); - to_fp32_sycl(dst_f16.get(), dst_ddf, ne_dst, main_stream); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__