|
| 1 | +// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out |
| 2 | +// |
| 3 | +// RUN: %CPU_RUN_PLACEHOLDER %t.out |
| 4 | +// RUN: %GPU_RUN_PLACEHOLDER %t.out |
| 5 | +// RUN: %ACC_RUN_PLACEHOLDER %t.out |
| 6 | + |
| 7 | +// Tests that a previously known case for reduction doesn't cause a requirement |
| 8 | +// for atomic64. |
| 9 | +// TODO: When aspect requirements are added to testing, this test could be set |
| 10 | +// to require that atomic64 is NOT supported, to limit how frequently the |
| 11 | +// test is run. However, it should work on devices that support atomic64 |
| 12 | +// as well. |
| 13 | + |
| 14 | +#include <sycl/sycl.hpp> |
| 15 | + |
| 16 | +#include <iostream> |
| 17 | + |
| 18 | +using namespace sycl; |
| 19 | + |
| 20 | +int main() { |
| 21 | + queue Q; |
| 22 | + |
| 23 | + if (Q.get_device().has(aspect::atomic64)) { |
| 24 | + std::cout << "Device supports aspect::atomic64 so we do not need to run " |
| 25 | + "the test." |
| 26 | + << std::endl; |
| 27 | + return 0; |
| 28 | + } |
| 29 | + |
| 30 | + long long *Out = malloc_shared<long long>(1, Q); |
| 31 | + |
| 32 | + // Case 1: nd_range reduction with 64-bit integer and either sycl::plus, |
| 33 | + // sycl::minimum or sycl::maximum. group_reduce_and_atomic_cross_wg strategy |
| 34 | + // would normally be picked, but if the device does not support atomic64 that |
| 35 | + // strategy is invalid. |
| 36 | + Q.submit([&](handler &CGH) { |
| 37 | + auto Redu = reduction(Out, 0ll, sycl::plus<long long>{}); |
| 38 | + CGH.parallel_for(nd_range<1>{range<1>{32}, range<1>{32}}, Redu, |
| 39 | + [=](nd_item<1> It, auto &Sum) { |
| 40 | + Sum.combine(It.get_global_linear_id()); |
| 41 | + }); |
| 42 | + }).wait(); |
| 43 | + |
| 44 | + // Case 2: nd_range reduction with 64-bit integer and either sycl::bit_or, |
| 45 | + // sycl::bit_xor, sycl::bit_and. local_mem_tree_and_atomic_cross_wg strategy |
| 46 | + // would normally be picked, but if the device does not support atomic64 that |
| 47 | + // strategy is invalid. |
| 48 | + Q.submit([&](handler &CGH) { |
| 49 | + auto Redu = reduction(Out, 0ll, sycl::bit_and<long long>{}); |
| 50 | + CGH.parallel_for(nd_range<1>{range<1>{32}, range<1>{32}}, Redu, |
| 51 | + [=](nd_item<1> It, auto &Sum) { |
| 52 | + Sum.combine(It.get_global_linear_id()); |
| 53 | + }); |
| 54 | + }).wait(); |
| 55 | + |
| 56 | + // Case 3: range reduction with 64-bit integer and either sycl::bit_or, |
| 57 | + // sycl::bit_xor, sycl::bit_and. local_atomic_and_atomic_cross_wg strategy |
| 58 | + // would normally be picked, but if the device does not support atomic64 that |
| 59 | + // strategy is invalid. |
| 60 | + Q.submit([&](handler &CGH) { |
| 61 | + auto Redu = reduction(Out, 0ll, sycl::bit_and<long long>{}); |
| 62 | + CGH.parallel_for(range<1>{32}, Redu, |
| 63 | + [=](item<1> It, auto &Sum) { Sum.combine(It); }); |
| 64 | + }).wait(); |
| 65 | + sycl::free(Out, Q); |
| 66 | + return 0; |
| 67 | +} |
0 commit comments