From 4eff21ffce9560fdb2114426cb91ab90e3324888 Mon Sep 17 00:00:00 2001 From: Steffen Larsen Date: Tue, 29 Nov 2022 17:50:02 +0100 Subject: [PATCH] [SYCL][CUDA] Add SM version check to bfloat16 CUDA test (intel/llvm-test-suite#1423) * [SYCL][CUDA] Add SM version check to bfloat16 CUDA test bfloat16 requires SM80 on the CUDA backend. This commit changes CUDA tests to do an early exit if that requirement is not met. * Remove CUDA specific test and try run SM80 Signed-off-by: Larsen, Steffen --- SYCL/BFloat16/bfloat16_type.cpp | 31 ++++++++++++++++++++++------ SYCL/BFloat16/bfloat16_type_cuda.cpp | 15 -------------- 2 files changed, 25 insertions(+), 21 deletions(-) delete mode 100644 SYCL/BFloat16/bfloat16_type_cuda.cpp diff --git a/SYCL/BFloat16/bfloat16_type.cpp b/SYCL/BFloat16/bfloat16_type.cpp index 28f1bf621b670..9aae326db8baf 100644 --- a/SYCL/BFloat16/bfloat16_type.cpp +++ b/SYCL/BFloat16/bfloat16_type.cpp @@ -1,12 +1,13 @@ -// UNSUPPORTED: hip -// RUN: %if cuda %{%clangxx -fsycl -fsycl-targets=%sycl_triple -Xsycl-target-backend --cuda-gpu-arch=sm_80 %s -o %t.out %} -// TODO enable the below when CI supports >=sm_80 -// RUNx: %if cuda %{%GPU_RUN_PLACEHOLDER %t.out %} -// RUN: %clangxx -fsycl %s -o %t.out +// RUN: %if cuda %{%clangxx -fsycl -fsycl-targets=%sycl_triple -DUSE_CUDA_SM80=1 -Xsycl-target-backend --cuda-gpu-arch=sm_80 %s -o %t.out %} +// RUN: %if cuda %{%GPU_RUN_PLACEHOLDER %t.out %} +// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out // TODO currently the feature isn't supported on FPGA. // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUNx: %ACC_RUN_PLACEHOLDER %t.out +// +// Not currently supported on HIP. +// UNSUPPORTED: hip //==----------- bfloat16_type.cpp - SYCL bfloat16 type test ----------------==// // @@ -18,4 +19,22 @@ #include "bfloat16_type.hpp" -int main() { return run_tests(); } +int main() { + +#ifdef USE_CUDA_SM80 + // Special build for SM80 CUDA. + sycl::device Dev{default_selector_v}; + if (Dev.get_platform().get_backend() != backend::ext_oneapi_cuda) { + std::cout << "Test skipped; CUDA run was not run with CUDA device." + << std::endl; + return 0; + } + if (std::stof(Dev.get_info()) < 8.0f) { + std::cout << "Test skipped; CUDA device does not support SM80 or newer." + << std::endl; + return 0; + } +#endif + + return run_tests(); +} diff --git a/SYCL/BFloat16/bfloat16_type_cuda.cpp b/SYCL/BFloat16/bfloat16_type_cuda.cpp deleted file mode 100644 index 81c4a08f128e4..0000000000000 --- a/SYCL/BFloat16/bfloat16_type_cuda.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// REQUIRES: gpu, cuda -// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple -Xsycl-target-backend --cuda-gpu-arch=sm_80 %s -o %t.out -// RUN: %t.out - -//==--------- bfloat16_type_cuda.cpp - SYCL bfloat16 type test -------------==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "bfloat16_type.hpp" - -int main() { return run_tests(); }