Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/LLNL/RAJA into long58/ci…
Browse files Browse the repository at this point in the history
…_expansion
long58 committed May 10, 2024
2 parents 59a8ccd + 5295aee commit 751fc03
Showing 73 changed files with 5,283 additions and 2,032 deletions.
6 changes: 4 additions & 2 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
@@ -75,7 +75,7 @@ stages:
include:
- local: '.gitlab/custom-jobs-and-variables.yml'
- project: 'radiuss/radiuss-shared-ci'
ref: 'v2023.12.3'
ref: 'v2024.04.0'
file: 'pipelines/${CI_MACHINE}.yml'
- artifact: '${CI_MACHINE}-jobs.yml'
job: 'generate-job-lists'
@@ -100,9 +100,11 @@ trigger-rajaperf:
strategy: depend

include:
- project: 'lc-templates/id_tokens'
file: 'id_tokens.yml'
# [Optional] checks preliminary to running the actual CI test
- project: 'radiuss/radiuss-shared-ci'
ref: 'v2023.12.3'
ref: 'v2024.04.0'
file: 'utilities/preliminary-ignore-draft-pr.yml'
# pipelines subscribed by the project
- local: '.gitlab/subscribed-pipelines.yml'
16 changes: 8 additions & 8 deletions .gitlab/custom-jobs-and-variables.yml
Original file line number Diff line number Diff line change
@@ -19,17 +19,17 @@ variables:
# Note: We repeat the reservation, necessary when jobs are manually re-triggered.
RUBY_JOB_ALLOC: "--reservation=ci --nodes=1"
# Project specific variants for ruby
PROJECT_RUBY_VARIANTS: "~shared +openmp +tests"
PROJECT_RUBY_VARIANTS: "~shared +openmp +vectorization +tests"
# Project specific deps for ruby
PROJECT_RUBY_DEPS: ""

# Poodle
# Arguments for top level allocation
POODLE_SHARED_ALLOC: "--exclusive --time=60 --nodes=1"
POODLE_SHARED_ALLOC: "--exclusive --time=120 --nodes=1"
# Arguments for job level allocation
POODLE_JOB_ALLOC: "--nodes=1"
# Project specific variants for poodle
PROJECT_POODLE_VARIANTS: "~shared +openmp +tests"
PROJECT_POODLE_VARIANTS: "~shared +openmp +vectorization +tests"
# Project specific deps for poodle
PROJECT_POODLE_DEPS: ""

@@ -39,26 +39,26 @@ variables:
# Arguments for job level allocation
CORONA_JOB_ALLOC: "--nodes=1 --begin-time=+5s"
# Project specific variants for corona
PROJECT_CORONA_VARIANTS: "~shared ~openmp +tests"
PROJECT_CORONA_VARIANTS: "~shared ~openmp +vectorization +tests"
# Project specific deps for corona
PROJECT_CORONA_DEPS: "^blt@develop "

# Tioga
# Arguments for top level allocation
TIOGA_SHARED_ALLOC: "--exclusive --time-limit=60m --nodes=1 -o per-resource.count=2"
TIOGA_SHARED_ALLOC: "--exclusive --queue=pci --time-limit=60m --nodes=1 -o per-resource.count=2"
# Arguments for job level allocation
TIOGA_JOB_ALLOC: "--nodes=1 --begin-time=+5s"
# Project specific variants for corona
PROJECT_TIOGA_VARIANTS: "~shared ~openmp +tests"
PROJECT_TIOGA_VARIANTS: "~shared +openmp +vectorization +tests"
# Project specific deps for corona
PROJECT_TIOGA_DEPS: "^blt@develop "

# Lassen and Butte use a different job scheduler (spectrum lsf) that does not
# allow pre-allocation the same way slurm does.
# Arguments for job level allocation
LASSEN_JOB_ALLOC: "1 -W 30 -q pci"
LASSEN_JOB_ALLOC: "1 -W 40 -q pci"
# Project specific variants for lassen
PROJECT_LASSEN_VARIANTS: "~shared +openmp +tests cuda_arch=70"
PROJECT_LASSEN_VARIANTS: "~shared +openmp +vectorization +tests cuda_arch=70"
# Project specific deps for lassen
PROJECT_LASSEN_DEPS: "^blt@develop "

2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@ include(CMakeDependentOption)
# Set version number
set(RAJA_VERSION_MAJOR 2024)
set(RAJA_VERSION_MINOR 02)
set(RAJA_VERSION_PATCHLEVEL 1)
set(RAJA_VERSION_PATCHLEVEL 2)

if (RAJA_LOADED AND (NOT RAJA_LOADED STREQUAL "${RAJA_VERSION_MAJOR}.${RAJA_VERSION_MINOR}.${RAJA_VERSION_PATCHLEVEL}"))
message(FATAL_ERROR "You are mixing RAJA versions. Loaded is ${RAJA_LOADED}, expected ${RAJA_VERSION_MAJOR}.${RAJA_VERSION_MINOR}.${RAJA_VERSION_PATCHLEVEL}")
33 changes: 33 additions & 0 deletions RELEASE_NOTES.md
Original file line number Diff line number Diff line change
@@ -20,6 +20,39 @@ Notable changes include:
* Bug fixes/improvements:


Version 2024.02.2 -- Release date 2024-05-08
============================================

This release contains a bugfix and new execution policies that improve
performance for GPU kernels with reductions.

Notable changes include:

* New features / API changes:
* New GPU execution policies for CUDA and HIP added which provide
improved performance for GPU kernels with reductions. Please see the
RAJA User Guide for more information. Short summary:
* Option added to change max grid size in policies that use the
occupancy calculator.
* Policies added to run with max occupancy, a fraction of of the
max occupancy, and to run with a "concretizer" which allows a
user to determine how to run based on what the occupancy
calculator determines about a kernel.
* Additional options to tune kernels containing reductions, such as
* an option to initialize data on host for reductions that use
atomic operations
* an option to avoid device scope memory fences
* Change ordering of SYCL thread index ordering in RAJA::launch to
follow the SYCL "row-major" convention. Please see RAJA User Guide
for more information.

* Build changes/improvements:
* NONE.

* Bug fixes/improvements:
* Fixed issue in bump-style allocator used internally in RAJA::launch.


Version 2024.02.1 -- Release date 2024-04-03
============================================

21 changes: 21 additions & 0 deletions docs/Licenses/rocprim-license.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
@@ -88,7 +88,7 @@
# The short X.Y version.
version = u'2024.02'
# The full version, including alpha/beta/rc tags.
release = u'2024.02.1'
release = u'2024.02.2'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
23 changes: 23 additions & 0 deletions docs/sphinx/user_guide/cook_book.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
.. ##
.. ## Copyright (c) 2016-24, Lawrence Livermore National Security, LLC
.. ## and RAJA project contributors. See the RAJA/LICENSE file
.. ## for details.
.. ##
.. ## SPDX-License-Identifier: (BSD-3-Clause)
.. ##
.. _cook-book-label:

************************
RAJA Cook Book
************************

The following sections show common use case patterns and the recommended
RAJA features and policies to use with them. They are intended
to provide users with complete beyond usage examples beyond what can be found in other parts of the RAJA User Guide. In particular, the examples and discussion provide guidance on RAJA execution policy selection to improve performance of user application codes.

.. toctree::
:maxdepth: 2

cook_book/reduction

110 changes: 110 additions & 0 deletions docs/sphinx/user_guide/cook_book/reduction.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
.. ##
.. ## Copyright (c) 2016-24, Lawrence Livermore National Security, LLC
.. ## and other RAJA project contributors. See the RAJA/LICENSE file
.. ## for details.
.. ##
.. ## SPDX-License-Identifier: (BSD-3-Clause)
.. ##
.. _cook-book-reductions-label:

=======================
Cooking with Reductions
=======================

Please see the following section for overview discussion about RAJA reductions:

* :ref:`feat-reductions-label`.


----------------------------
Reductions with RAJA::forall
----------------------------

Here is the setup for a simple reduction example::

const int N = 1000;

int vec[N];

for (int i = 0; i < N; ++i) {

vec[i] = 1;

}

Here a simple sum reduction is performed in a for loop::

int vsum = 0;

// Run a kernel using the reduction objects
for (int i = 0; i < N; ++i) {

vsum += vec[i];

}

The results of these operations will yield the following values:

* ``vsum == 1000``

RAJA uses policy types to specify how things are implemented.

The forall *execution policy* specifies how the loop is run by the ``RAJA::forall`` method. The following discussion includes examples of several other RAJA execution policies that could be applied.
For example ``RAJA::seq_exec`` runs a C-style for loop sequentially on a CPU. The
``RAJA::cuda_exec_with_reduce<256>`` runs the loop as a CUDA GPU kernel with
256 threads per block and other CUDA kernel launch parameters, like the
number of blocks, optimized for performance with reducers.::

using exec_policy = RAJA::seq_exec;
// using exec_policy = RAJA::omp_parallel_for_exec;
// using exec_policy = RAJA::omp_target_parallel_for_exec<256>;
// using exec_policy = RAJA::cuda_exec_with_reduce<256>;
// using exec_policy = RAJA::hip_exec_with_reduce<256>;
// using exec_policy = RAJA::sycl_exec<256>;

The reduction policy specifies how the reduction is done and must match the
execution policy. For example ``RAJA::seq_reduce`` does a sequential reduction
and can only be used with sequential execution policies. The
``RAJA::cuda_reduce_atomic`` policy uses atomics, if possible with the given
data type, and can only be used with cuda execution policies. Similarly for other RAJA execution back-ends, such as HIP and OpenMP. Here are example RAJA reduction policies whose names are indicative of which execution policies they work with::

using reduce_policy = RAJA::seq_reduce;
// using reduce_policy = RAJA::omp_reduce;
// using reduce_policy = RAJA::omp_target_reduce;
// using reduce_policy = RAJA::cuda_reduce_atomic;
// using reduce_policy = RAJA::hip_reduce_atomic;
// using reduce_policy = RAJA::sycl_reduce;


Here a simple sum reduction is performed using RAJA::

RAJA::ReduceSum<reduce_policy, int> vsum(0);

RAJA::forall<exec_policy>( RAJA::RangeSegment(0, N),
[=](RAJA::Index_type i) {

vsum += vec[i];

});

The results of these operations will yield the following values:

* ``vsum.get() == 1000``


Another option for the execution policy when using the cuda or hip backends are
the base policies which have a boolean parameter to choose between the general
use ``cuda/hip_exec`` policy and the ``cuda/hip_exec_with_reduce`` policy.::

// static constexpr bool with_reduce = ...;
// using exec_policy = RAJA::cuda_exec_base<with_reduce, 256>;
// using exec_policy = RAJA::hip_exec_base<with_reduce, 256>;

Another option for the reduction policy when using the cuda or hip backends are
the base policies which have a boolean parameter to choose between the atomic
``cuda/hip_reduce_atomic`` policy and the non-atomic ``cuda/hip_reduce`` policy.::

// static constexpr bool with_atomic = ...;
// using reduce_policy = RAJA::cuda_reduce_base<with_atomic>;
// using reduce_policy = RAJA::hip_reduce_base<with_atomic>;
717 changes: 433 additions & 284 deletions docs/sphinx/user_guide/feature/policies.rst

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions docs/sphinx/user_guide/feature/reduction.rst
Original file line number Diff line number Diff line change
@@ -39,6 +39,10 @@ RAJA reductions:

* :ref:`tut-reduction-label`.

Please see the following cook book sections for guidance on policy usage:

* :ref:`cook-book-reductions-label`.


----------------
Reduction Types
1 change: 1 addition & 0 deletions docs/sphinx/user_guide/index.rst
Original file line number Diff line number Diff line change
@@ -32,5 +32,6 @@ to use RAJA in an application can be found in :ref:`app-considerations-label`.
using_raja
config_options
features
cook_book
app_considerations
tutorial
14 changes: 7 additions & 7 deletions include/RAJA/RAJA.hpp
Original file line number Diff line number Diff line change
@@ -33,8 +33,10 @@
#include "RAJA/util/camp_aliases.hpp"
#include "RAJA/util/macros.hpp"
#include "RAJA/util/types.hpp"
#include "RAJA/util/math.hpp"
#include "RAJA/util/plugins.hpp"
#include "RAJA/util/Registry.hpp"
#include "RAJA/util/for_each.hpp"


//
@@ -57,13 +59,6 @@
//
#include "RAJA/policy/sequential.hpp"

//
// NOTE: LOOP POLCIES WERE DEPRECATED IN 2023.03.0 RELEASE.
// THEY ARE RE-ADDED HERE AT REQUEST OF USERS.
// THEY WILL BE REMOVED AGAIN IN THE FUTURE.
//
#include "RAJA/policy/loop.hpp"

//
// All platforms should support simd and vector execution.
//
@@ -155,6 +150,11 @@
//
#include "RAJA/util/sort.hpp"

//
// reduce algorithms
//
#include "RAJA/util/reduce.hpp"

//
// WorkPool, WorkGroup, WorkSite objects
//
8 changes: 6 additions & 2 deletions include/RAJA/pattern/launch/launch_core.hpp
Original file line number Diff line number Diff line change
@@ -174,10 +174,14 @@ class LaunchContext
template<typename T>
RAJA_HOST_DEVICE T* getSharedMemory(size_t bytes)
{
T * mem_ptr = &((T*) shared_mem_ptr)[shared_mem_offset];

//Calculate offset in bytes with a char pointer
void* mem_ptr = static_cast<char *>(shared_mem_ptr) + shared_mem_offset;

shared_mem_offset += bytes*sizeof(T);
return mem_ptr;

//convert to desired type
return static_cast<T*>(mem_ptr);
}

/*
289 changes: 169 additions & 120 deletions include/RAJA/policy/cuda/MemUtils_CUDA.hpp

Large diffs are not rendered by default.

169 changes: 96 additions & 73 deletions include/RAJA/policy/cuda/forall.hpp

Large diffs are not rendered by default.

467 changes: 467 additions & 0 deletions include/RAJA/policy/cuda/intrinsics.hpp

Large diffs are not rendered by default.

27 changes: 16 additions & 11 deletions include/RAJA/policy/cuda/kernel/CudaKernel.hpp
Original file line number Diff line number Diff line change
@@ -87,7 +87,7 @@ namespace statement
*/
template <typename LaunchConfig, typename... EnclosedStmts>
struct CudaKernelExt
: public internal::Statement<::RAJA::policy::cuda::cuda_exec_explicit<LaunchConfig, void, 0, true>, EnclosedStmts...> {
: public internal::Statement<::RAJA::policy::cuda::cuda_exec_explicit<LaunchConfig, void, void, 0, true>, EnclosedStmts...> {
};


@@ -284,7 +284,7 @@ struct CudaLaunchHelper<cuda_explicit_launch<async0, num_blocks, num_threads, bl
inline static void recommended_blocks_threads(size_t shmem_size,
int &recommended_blocks, int &recommended_threads)
{
auto func = kernelGetter_t::get();
auto func = reinterpret_cast<const void*>(kernelGetter_t::get());

if (num_blocks <= 0) {

@@ -294,8 +294,10 @@ struct CudaLaunchHelper<cuda_explicit_launch<async0, num_blocks, num_threads, bl
// determine blocks at runtime
// determine threads at runtime
//
::RAJA::cuda::cuda_occupancy_max_blocks_threads<Self>(
func, shmem_size, recommended_blocks, recommended_threads);
auto data = ::RAJA::cuda::cuda_occupancy_max_blocks_threads<Self>(
func, shmem_size);
recommended_blocks = data.func_max_blocks_per_device;
recommended_threads = data.func_max_threads_per_block;

} else {

@@ -305,8 +307,9 @@ struct CudaLaunchHelper<cuda_explicit_launch<async0, num_blocks, num_threads, bl
//
recommended_threads = num_threads;

::RAJA::cuda::cuda_occupancy_max_blocks<Self, num_threads>(
func, shmem_size, recommended_blocks);
auto data = ::RAJA::cuda::cuda_occupancy_max_blocks<Self, num_threads>(
func, shmem_size);
recommended_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

}

@@ -360,7 +363,7 @@ struct CudaLaunchHelper<cuda_explicit_launch<async0, num_blocks, num_threads, bl
inline static void max_blocks(size_t shmem_size,
int &max_blocks, int actual_threads)
{
auto func = kernelGetter_t::get();
auto func = reinterpret_cast<const void*>(kernelGetter_t::get());

if (num_blocks <= 0) {

@@ -373,16 +376,18 @@ struct CudaLaunchHelper<cuda_explicit_launch<async0, num_blocks, num_threads, bl
//
// determine blocks when actual_threads != num_threads
//
::RAJA::cuda::cuda_occupancy_max_blocks<Self>(
func, shmem_size, max_blocks, actual_threads);
auto data = ::RAJA::cuda::cuda_occupancy_max_blocks<Self>(
func, shmem_size, actual_threads);
max_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

} else {

//
// determine blocks when actual_threads == num_threads
//
::RAJA::cuda::cuda_occupancy_max_blocks<Self, num_threads>(
func, shmem_size, max_blocks);
auto data = ::RAJA::cuda::cuda_occupancy_max_blocks<Self, num_threads>(
func, shmem_size);
max_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

}

10 changes: 5 additions & 5 deletions include/RAJA/policy/cuda/kernel/For.hpp
Original file line number Diff line number Diff line change
@@ -108,7 +108,7 @@ template <typename Data,
struct CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

@@ -123,7 +123,7 @@ struct CudaStatementExecutor<
using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = RAJA::internal::KernelDimensionCalculator<
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>>;
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>>;


static inline RAJA_DEVICE
@@ -180,7 +180,7 @@ template <typename Data,
struct CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

@@ -195,7 +195,7 @@ struct CudaStatementExecutor<
using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = RAJA::internal::KernelDimensionCalculator<
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>>;
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>>;


static inline RAJA_DEVICE
@@ -246,7 +246,7 @@ struct CudaStatementExecutor<
statement::For<ArgumentId, seq_exec, EnclosedStmts...>,
Types>
: CudaStatementExecutor<Data, statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
cuda::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
14 changes: 7 additions & 7 deletions include/RAJA/policy/cuda/kernel/ForICount.hpp
Original file line number Diff line number Diff line change
@@ -103,20 +103,20 @@ template <typename Data,
struct CudaStatementExecutor<
Data,
statement::ForICount<ArgumentId, ParamId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>
: public CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -166,20 +166,20 @@ template <typename Data,
struct CudaStatementExecutor<
Data,
statement::ForICount<ArgumentId, ParamId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>
: public CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = CudaStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -226,7 +226,7 @@ struct CudaStatementExecutor<
statement::ForICount<ArgumentId, ParamId, seq_exec, EnclosedStmts...>,
Types>
: CudaStatementExecutor<Data, statement::ForICount<ArgumentId,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
cuda::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
10 changes: 5 additions & 5 deletions include/RAJA/policy/cuda/kernel/Tile.hpp
Original file line number Diff line number Diff line change
@@ -143,7 +143,7 @@ struct CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>, Types>
{

@@ -153,7 +153,7 @@ struct CudaStatementExecutor<

using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>>;
using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>>;

static inline RAJA_DEVICE
void exec(Data &data, bool thread_active)
@@ -233,7 +233,7 @@ struct CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>, Types>
{

@@ -243,7 +243,7 @@ struct CudaStatementExecutor<

using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>>;
using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>>;

static inline RAJA_DEVICE
void exec(Data &data, bool thread_active)
@@ -318,7 +318,7 @@ struct CudaStatementExecutor<
Data,
statement::Tile<ArgumentId, TPol, seq_exec, EnclosedStmts...>, Types>
: CudaStatementExecutor<Data, statement::Tile<ArgumentId, TPol,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
cuda::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
14 changes: 7 additions & 7 deletions include/RAJA/policy/cuda/kernel/TileTCount.hpp
Original file line number Diff line number Diff line change
@@ -131,22 +131,22 @@ struct CudaStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>
: public CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -209,22 +209,22 @@ struct CudaStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>
: public CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = CudaStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -281,7 +281,7 @@ struct CudaStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId, TPol, seq_exec, EnclosedStmts...>, Types>
: CudaStatementExecutor<Data, statement::TileTCount<ArgumentId, ParamId, TPol,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
cuda::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
18 changes: 9 additions & 9 deletions include/RAJA/policy/cuda/kernel/internal.hpp
Original file line number Diff line number Diff line change
@@ -388,7 +388,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp

// specialization for strided loop sequential policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::ignored, named_usage::ignored>>>
{
@@ -402,7 +402,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp

// specialization for strided loop thread policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::unspecified, named_usage::ignored>>>
{
@@ -418,7 +418,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp
};
///
template<named_dim dim, int BLOCK_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, BLOCK_SIZE, named_usage::ignored>>>
{
@@ -436,7 +436,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp

// specialization for strided loop block policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::ignored, named_usage::unspecified>>>
{
@@ -451,7 +451,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp
};
///
template<named_dim dim, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::ignored, GRID_SIZE>>>
{
@@ -469,7 +469,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp

// specialization for strided loop global policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::unspecified, named_usage::unspecified>>>
{
@@ -488,7 +488,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp
};
///
template<named_dim dim, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, named_usage::unspecified, GRID_SIZE>>>
{
@@ -508,7 +508,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp
};
///
template<named_dim dim, int BLOCK_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, BLOCK_SIZE, named_usage::unspecified>>>
{
@@ -527,7 +527,7 @@ struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapp
};
///
template<named_dim dim, int BLOCK_SIZE, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::cuda::cuda_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
cuda::IndexGlobal<dim, BLOCK_SIZE, GRID_SIZE>>>
{
24 changes: 12 additions & 12 deletions include/RAJA/policy/cuda/launch.hpp
Original file line number Diff line number Diff line change
@@ -433,7 +433,7 @@ struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::Dir
};

template <typename SEGMENT, typename IndexMapper>
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -457,7 +457,7 @@ struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::Str
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -493,7 +493,7 @@ struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::Str
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -625,7 +625,7 @@ struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mappin
};

template <typename SEGMENT, typename IndexMapper>
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -649,7 +649,7 @@ struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mappin
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -686,7 +686,7 @@ struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mappin
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -810,18 +810,18 @@ struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapp
};

template<typename SEGMENT, kernel_sync_requirement sync, typename IndexMapper0>
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
IndexMapper0>,
SEGMENT>
: LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
: LoopExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
IndexMapper0>,
SEGMENT>
{};

template<typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -852,7 +852,7 @@ struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapp
};

template<typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::cuda::cuda_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -914,7 +914,7 @@ struct TileExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::Dir
};

template <typename SEGMENT, typename IndexMapper>
struct TileExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct TileExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -964,7 +964,7 @@ struct TileTCountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mappin
};

template <typename SEGMENT, typename IndexMapper>
struct TileTCountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop,
struct TileTCountExecute<RAJA::policy::cuda::cuda_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
352 changes: 328 additions & 24 deletions include/RAJA/policy/cuda/policy.hpp

Large diffs are not rendered by default.

898 changes: 378 additions & 520 deletions include/RAJA/policy/cuda/reduce.hpp

Large diffs are not rendered by default.

12 changes: 8 additions & 4 deletions include/RAJA/policy/cuda/scan.hpp
Original file line number Diff line number Diff line change
@@ -44,6 +44,7 @@ namespace scan
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
size_t BLOCKS_PER_SM,
bool Async,
typename InputIter,
@@ -52,7 +53,7 @@ RAJA_INLINE
resources::EventProxy<resources::Cuda>
inclusive_inplace(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
InputIter begin,
InputIter end,
Function binary_op)
@@ -96,6 +97,7 @@ inclusive_inplace(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
size_t BLOCKS_PER_SM,
bool Async,
typename InputIter,
@@ -105,7 +107,7 @@ RAJA_INLINE
resources::EventProxy<resources::Cuda>
exclusive_inplace(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
InputIter begin,
InputIter end,
Function binary_op,
@@ -152,6 +154,7 @@ exclusive_inplace(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
size_t BLOCKS_PER_SM,
bool Async,
typename InputIter,
@@ -161,7 +164,7 @@ RAJA_INLINE
resources::EventProxy<resources::Cuda>
inclusive(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
InputIter begin,
InputIter end,
OutputIter out,
@@ -206,6 +209,7 @@ inclusive(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
size_t BLOCKS_PER_SM,
bool Async,
typename InputIter,
@@ -216,7 +220,7 @@ RAJA_INLINE
resources::EventProxy<resources::Cuda>
exclusive(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
InputIter begin,
InputIter end,
OutputIter out,
66 changes: 42 additions & 24 deletions include/RAJA/policy/cuda/sort.hpp
Original file line number Diff line number Diff line change
@@ -44,7 +44,9 @@ namespace sort
/*!
\brief static assert unimplemented stable sort
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter, typename Compare>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
concepts::negate<concepts::all_of<
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
@@ -54,7 +56,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<Iter>>>>>>>
stable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
Iter,
Iter,
Compare)
@@ -75,13 +77,15 @@ stable(
/*!
\brief stable sort given range in ascending order
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
stable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
Iter begin,
Iter end,
operators::less<RAJA::detail::IterVal<Iter>>)
@@ -143,13 +147,15 @@ stable(
/*!
\brief stable sort given range in descending order
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
stable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
Iter begin,
Iter end,
operators::greater<RAJA::detail::IterVal<Iter>>)
@@ -212,7 +218,9 @@ stable(
/*!
\brief static assert unimplemented sort
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter, typename Compare>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
concepts::negate<concepts::all_of<
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
@@ -222,7 +230,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<Iter>>>>>>>
unstable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
Iter,
Iter,
Compare)
@@ -243,13 +251,15 @@ unstable(
/*!
\brief sort given range in ascending order
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
unstable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async> p,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async> p,
Iter begin,
Iter end,
operators::less<RAJA::detail::IterVal<Iter>> comp)
@@ -260,13 +270,15 @@ unstable(
/*!
\brief sort given range in descending order
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
unstable(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async> p,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async> p,
Iter begin,
Iter end,
operators::greater<RAJA::detail::IterVal<Iter>> comp)
@@ -278,7 +290,8 @@ unstable(
/*!
\brief static assert unimplemented stable sort pairs
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
concepts::negate<concepts::all_of<
@@ -290,7 +303,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<KeyIter>>>>>>>
stable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
KeyIter,
KeyIter,
ValIter,
@@ -314,15 +327,16 @@ stable_pairs(
/*!
\brief stable sort given range of pairs in ascending order of keys
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
stable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -396,15 +410,16 @@ stable_pairs(
/*!
\brief stable sort given range of pairs in descending order of keys
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
stable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -479,7 +494,8 @@ stable_pairs(
/*!
\brief static assert unimplemented sort pairs
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
concepts::negate<concepts::all_of<
@@ -491,7 +507,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<KeyIter>>>>>>>
unstable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async>,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async>,
KeyIter,
KeyIter,
ValIter,
@@ -515,15 +531,16 @@ unstable_pairs(
/*!
\brief stable sort given range of pairs in ascending order of keys
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
unstable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async> p,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async> p,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -535,15 +552,16 @@ unstable_pairs(
/*!
\brief stable sort given range of pairs in descending order of keys
*/
template <typename IterationMapping, typename IterationGetter, size_t BLOCKS_PER_SM, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, size_t BLOCKS_PER_SM, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Cuda>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
unstable_pairs(
resources::Cuda cuda_res,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, BLOCKS_PER_SM, Async> p,
::RAJA::policy::cuda::cuda_exec_explicit<IterationMapping, IterationGetter, Concretizer, BLOCKS_PER_SM, Async> p,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
296 changes: 170 additions & 126 deletions include/RAJA/policy/hip/MemUtils_HIP.hpp

Large diffs are not rendered by default.

169 changes: 96 additions & 73 deletions include/RAJA/policy/hip/forall.hpp

Large diffs are not rendered by default.

362 changes: 362 additions & 0 deletions include/RAJA/policy/hip/intrinsics.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,362 @@
/*!
******************************************************************************
*
* \file
*
* \brief Header file containing RAJA intrinsics templates for HIP execution.
*
* These methods should work on any platform that supports
* HIP devices.
*
******************************************************************************
*/

//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2016-24, Lawrence Livermore National Security, LLC
// and RAJA project contributors. See the RAJA/LICENSE file for details.
//
// SPDX-License-Identifier: (BSD-3-Clause)
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//

#ifndef RAJA_hip_intrinsics_HPP
#define RAJA_hip_intrinsics_HPP

#include "RAJA/config.hpp"

#if defined(RAJA_ENABLE_HIP)

#include <type_traits>

#include <hip/hip_runtime.h>

#include "RAJA/util/macros.hpp"
#include "RAJA/util/SoAArray.hpp"
#include "RAJA/util/types.hpp"

#include "RAJA/policy/hip/policy.hpp"


namespace RAJA
{

namespace hip
{

namespace impl
{

/*!
* \brief Abstracts access to memory when coordinating between threads at
* device scope. The fences provided here are to be used with relaxed
* atomics in order to guarantee memory ordering and visibility of the
* accesses done through this class.
*
* \Note This uses device scope fences to ensure ordering and to flush local
* caches so that memory accesses become visible to the whole device.
* \Note This class uses normal memory accesses that are cached in local caches
* so device scope fences are required to make memory accesses visible
* to the whole device.
*/
struct AccessorDeviceScopeUseDeviceFence : RAJA::detail::DefaultAccessor
{
static RAJA_DEVICE RAJA_INLINE void fence_acquire()
{
__threadfence();
}

static RAJA_DEVICE RAJA_INLINE void fence_release()
{
__threadfence();
}
};

/*!
******************************************************************************
*
* \brief Abstracts access to memory when coordinating between threads at
* device scope. The fences provided here are to be used with relaxed
* atomics in order to guarantee memory ordering and visibility of the
* accesses done through this class.
*
* \Note This may use block scope fences to ensure ordering and avoid flushing
* local caches so special memory accesses are used to ensure visibility
* to the whole device.
* \Note This class uses device scope atomic memory accesses to bypass local
* caches so memory accesses are visible to the whole device without
* device scope fences.
* \Note A memory access may be split into multiple memory accesses, so
* even though atomic instructions are used concurrent accesses between
* different threads are not thread safe.
*
******************************************************************************
*/
struct AccessorDeviceScopeUseBlockFence
{
// hip has 32 and 64 bit atomics
static constexpr size_t min_atomic_int_type_size = sizeof(unsigned int);
static constexpr size_t max_atomic_int_type_size = sizeof(unsigned long long);

template < typename T >
static RAJA_DEVICE RAJA_INLINE T get(T* in_ptr, size_t idx)
{
using ArrayType = RAJA::detail::AsIntegerArray<T, min_atomic_int_type_size, max_atomic_int_type_size>;
using integer_type = typename ArrayType::integer_type;

ArrayType u;
auto ptr = const_cast<integer_type*>(reinterpret_cast<const integer_type*>(in_ptr + idx));

for (size_t i = 0; i < u.array_size(); ++i) {
#if defined(RAJA_USE_HIP_INTRINSICS) && RAJA_INTERNAL_CLANG_HAS_BUILTIN(__hip_atomic_load)
u.array[i] = __hip_atomic_load(&ptr[i], __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
#else
u.array[i] = atomicAdd(&ptr[i], integer_type(0));
#endif
}

return u.get_value();
}

template < typename T >
static RAJA_DEVICE RAJA_INLINE void set(T* in_ptr, size_t idx, T val)
{
using ArrayType = RAJA::detail::AsIntegerArray<T, min_atomic_int_type_size, max_atomic_int_type_size>;
using integer_type = typename ArrayType::integer_type;

ArrayType u;
u.set_value(val);
auto ptr = reinterpret_cast<integer_type*>(in_ptr + idx);

for (size_t i = 0; i < u.array_size(); ++i) {
#if defined(RAJA_USE_HIP_INTRINSICS) && RAJA_INTERNAL_CLANG_HAS_BUILTIN(__hip_atomic_store)
__hip_atomic_store(&ptr[i], u.array[i], __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
#else
atomicExch(&ptr[i], u.array[i]);
#endif
}
}

static RAJA_DEVICE RAJA_INLINE void fence_acquire()
{
#if defined(RAJA_USE_HIP_INTRINSICS) && RAJA_INTERNAL_CLANG_HAS_BUILTIN(__builtin_amdgcn_fence)
__builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "workgroup");
#else
__threadfence();
#endif
}

static RAJA_DEVICE RAJA_INLINE void fence_release()
{
#if defined(RAJA_USE_HIP_INTRINSICS) && RAJA_INTERNAL_CLANG_HAS_BUILTIN(__builtin_amdgcn_fence) && \
RAJA_INTERNAL_CLANG_HAS_BUILTIN(__builtin_amdgcn_s_waitcnt)
__builtin_amdgcn_fence(__ATOMIC_RELEASE, "workgroup");
// Wait until all vmem operations complete (s_waitcnt vmcnt(0))
__builtin_amdgcn_s_waitcnt(/*vmcnt*/ 0 | (/*exp_cnt*/ 0x7 << 4) | (/*lgkmcnt*/ 0xf << 8));
#else
__threadfence();
#endif
}
};


// hip only has shfl primitives for 32 bits
constexpr size_t min_shfl_int_type_size = sizeof(unsigned int);
constexpr size_t max_shfl_int_type_size = sizeof(unsigned int);

/*!
******************************************************************************
*
* \brief Method to shuffle 32b registers in sum reduction for arbitrary type.
*
* \Note Returns an undefined value if src lane is inactive (divergence).
* Returns this lane's value if src lane is out of bounds or has exited.
*
******************************************************************************
*/
template <typename T>
RAJA_DEVICE RAJA_INLINE T shfl_xor_sync(T var, int laneMask)
{
RAJA::detail::AsIntegerArray<T, min_shfl_int_type_size, max_shfl_int_type_size> u;
u.set_value(var);

for (size_t i = 0; i < u.array_size(); ++i) {
u.array[i] = ::__shfl_xor(u.array[i], laneMask);
}
return u.get_value();
}

template <typename T>
RAJA_DEVICE RAJA_INLINE T shfl_sync(T var, int srcLane)
{
RAJA::detail::AsIntegerArray<T, min_shfl_int_type_size, max_shfl_int_type_size> u;
u.set_value(var);

for (size_t i = 0; i < u.array_size(); ++i) {
u.array[i] = ::__shfl(u.array[i], srcLane);
}
return u.get_value();
}


template <>
RAJA_DEVICE RAJA_INLINE int shfl_xor_sync<int>(int var, int laneMask)
{
return ::__shfl_xor(var, laneMask);
}

template <>
RAJA_DEVICE RAJA_INLINE float shfl_xor_sync<float>(float var, int laneMask)
{
return ::__shfl_xor(var, laneMask);
}

template <>
RAJA_DEVICE RAJA_INLINE int shfl_sync<int>(int var, int srcLane)
{
return ::__shfl(var, srcLane);
}

template <>
RAJA_DEVICE RAJA_INLINE float shfl_sync<float>(float var, int srcLane)
{
return ::__shfl(var, srcLane);
}


//! reduce values in block into thread 0
template <typename Combiner, typename T>
RAJA_DEVICE RAJA_INLINE T warp_reduce(T val, T RAJA_UNUSED_ARG(identity))
{
int numThreads = blockDim.x * blockDim.y * blockDim.z;

int threadId = threadIdx.x + blockDim.x * threadIdx.y +
(blockDim.x * blockDim.y) * threadIdx.z;

T temp = val;

if (numThreads % policy::hip::WARP_SIZE == 0) {

// reduce each warp
for (int i = 1; i < policy::hip::WARP_SIZE; i *= 2) {
T rhs = shfl_xor_sync(temp, i);
Combiner{}(temp, rhs);
}

} else {

// reduce each warp
for (int i = 1; i < policy::hip::WARP_SIZE; i *= 2) {
int srcLane = threadId ^ i;
T rhs = shfl_sync(temp, srcLane);
// only add from threads that exist (don't double count own value)
if (srcLane < numThreads) {
Combiner{}(temp, rhs);
}
}
}

return temp;
}

/*!
* Allreduce values in a warp.
*
*
* This does a butterfly pattern leaving each lane with the full reduction
*
*/
template <typename Combiner, typename T>
RAJA_DEVICE RAJA_INLINE T warp_allreduce(T val)
{
T temp = val;

for (int i = 1; i < policy::hip::WARP_SIZE; i *= 2) {
T rhs = shfl_xor_sync(temp, i);
Combiner{}(temp, rhs);
}

return temp;
}


//! reduce values in block into thread 0
template <typename Combiner, typename T>
RAJA_DEVICE RAJA_INLINE T block_reduce(T val, T identity)
{
int numThreads = blockDim.x * blockDim.y * blockDim.z;

int threadId = threadIdx.x + blockDim.x * threadIdx.y +
(blockDim.x * blockDim.y) * threadIdx.z;

int warpId = threadId % policy::hip::WARP_SIZE;
int warpNum = threadId / policy::hip::WARP_SIZE;

T temp = val;

if (numThreads % policy::hip::WARP_SIZE == 0) {

// reduce each warp
for (int i = 1; i < policy::hip::WARP_SIZE; i *= 2) {
T rhs = shfl_xor_sync(temp, i);
Combiner{}(temp, rhs);
}

} else {

// reduce each warp
for (int i = 1; i < policy::hip::WARP_SIZE; i *= 2) {
int srcLane = threadId ^ i;
T rhs = shfl_sync(temp, srcLane);
// only add from threads that exist (don't double count own value)
if (srcLane < numThreads) {
Combiner{}(temp, rhs);
}
}
}

// reduce per warp values
if (numThreads > policy::hip::WARP_SIZE) {

static_assert(policy::hip::MAX_WARPS <= policy::hip::WARP_SIZE,
"Max Warps must be less than or equal to Warp Size for this algorithm to work");

__shared__ unsigned char tmpsd[sizeof(RAJA::detail::SoAArray<T, policy::hip::MAX_WARPS>)];
RAJA::detail::SoAArray<T, policy::hip::MAX_WARPS>* sd =
reinterpret_cast<RAJA::detail::SoAArray<T, policy::hip::MAX_WARPS> *>(tmpsd);

// write per warp values to shared memory
if (warpId == 0) {
sd->set(warpNum, temp);
}

__syncthreads();

if (warpNum == 0) {

// read per warp values
if (warpId * policy::hip::WARP_SIZE < numThreads) {
temp = sd->get(warpId);
} else {
temp = identity;
}

for (int i = 1; i < policy::hip::MAX_WARPS; i *= 2) {
T rhs = shfl_xor_sync(temp, i);
Combiner{}(temp, rhs);
}
}

__syncthreads();
}

return temp;
}

} // end namespace impl

} // end namespace hip

} // end namespace RAJA

#endif // closing endif for RAJA_ENABLE_HIP guard

#endif // closing endif for header file include guard
2 changes: 1 addition & 1 deletion include/RAJA/policy/hip/kernel.hpp
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
* \file
*
* \brief RAJA header file containing constructs used to run kernel::forall
* traversals on GPU with CUDA.
* traversals on GPU with HIP.
*
******************************************************************************
*/
10 changes: 5 additions & 5 deletions include/RAJA/policy/hip/kernel/For.hpp
Original file line number Diff line number Diff line change
@@ -108,7 +108,7 @@ template <typename Data,
struct HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

@@ -123,7 +123,7 @@ struct HipStatementExecutor<
using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = RAJA::internal::KernelDimensionCalculator<
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>>;
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>>;


static inline RAJA_DEVICE
@@ -180,7 +180,7 @@ template <typename Data,
struct HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

@@ -195,7 +195,7 @@ struct HipStatementExecutor<
using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = RAJA::internal::KernelDimensionCalculator<
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>>;
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>>;


static inline RAJA_DEVICE
@@ -246,7 +246,7 @@ struct HipStatementExecutor<
statement::For<ArgumentId, seq_exec, EnclosedStmts...>,
Types>
: HipStatementExecutor<Data, statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
hip::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
14 changes: 7 additions & 7 deletions include/RAJA/policy/hip/kernel/ForICount.hpp
Original file line number Diff line number Diff line change
@@ -103,20 +103,20 @@ template <typename Data,
struct HipStatementExecutor<
Data,
statement::ForICount<ArgumentId, ParamId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>
: public HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -166,20 +166,20 @@ template <typename Data,
struct HipStatementExecutor<
Data,
statement::ForICount<ArgumentId, ParamId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>
: public HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = HipStatementExecutor<
Data,
statement::For<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -226,7 +226,7 @@ struct HipStatementExecutor<
statement::ForICount<ArgumentId, ParamId, seq_exec, EnclosedStmts...>,
Types>
: HipStatementExecutor<Data, statement::ForICount<ArgumentId,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
hip::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
27 changes: 16 additions & 11 deletions include/RAJA/policy/hip/kernel/HipKernel.hpp
Original file line number Diff line number Diff line change
@@ -87,7 +87,7 @@ namespace statement
*/
template <typename LaunchConfig, typename... EnclosedStmts>
struct HipKernelExt
: public internal::Statement<::RAJA::policy::hip::hip_exec<LaunchConfig, void, true>, EnclosedStmts...> {
: public internal::Statement<::RAJA::policy::hip::hip_exec<LaunchConfig, void, void, true>, EnclosedStmts...> {
};


@@ -263,7 +263,7 @@ struct HipLaunchHelper<hip_explicit_launch<async0, num_blocks, num_threads>,Stmt
inline static void recommended_blocks_threads(size_t shmem_size,
int &recommended_blocks, int &recommended_threads)
{
auto func = kernelGetter_t::get();
auto func = reinterpret_cast<const void*>(kernelGetter_t::get());

if (num_blocks <= 0) {

@@ -273,8 +273,10 @@ struct HipLaunchHelper<hip_explicit_launch<async0, num_blocks, num_threads>,Stmt
// determine blocks at runtime
// determine threads at runtime
//
::RAJA::hip::hip_occupancy_max_blocks_threads<Self>(
func, shmem_size, recommended_blocks, recommended_threads);
auto data = ::RAJA::hip::hip_occupancy_max_blocks_threads<Self>(
func, shmem_size);
recommended_blocks = data.func_max_blocks_per_device;
recommended_threads = data.func_max_threads_per_block;

} else {

@@ -284,8 +286,9 @@ struct HipLaunchHelper<hip_explicit_launch<async0, num_blocks, num_threads>,Stmt
//
recommended_threads = num_threads;

::RAJA::hip::hip_occupancy_max_blocks<Self, num_threads>(
func, shmem_size, recommended_blocks);
auto data = ::RAJA::hip::hip_occupancy_max_blocks<Self, num_threads>(
func, shmem_size);
recommended_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

}

@@ -339,7 +342,7 @@ struct HipLaunchHelper<hip_explicit_launch<async0, num_blocks, num_threads>,Stmt
inline static void max_blocks(size_t shmem_size,
int &max_blocks, int actual_threads)
{
auto func = kernelGetter_t::get();
auto func = reinterpret_cast<const void*>(kernelGetter_t::get());

if (num_blocks <= 0) {

@@ -352,16 +355,18 @@ struct HipLaunchHelper<hip_explicit_launch<async0, num_blocks, num_threads>,Stmt
//
// determine blocks when actual_threads != num_threads
//
::RAJA::hip::hip_occupancy_max_blocks<Self>(
func, shmem_size, max_blocks, actual_threads);
auto data = ::RAJA::hip::hip_occupancy_max_blocks<Self>(
func, shmem_size, actual_threads);
max_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

} else {

//
// determine blocks when actual_threads == num_threads
//
::RAJA::hip::hip_occupancy_max_blocks<Self, num_threads>(
func, shmem_size, max_blocks);
auto data = ::RAJA::hip::hip_occupancy_max_blocks<Self, num_threads>(
func, shmem_size);
max_blocks = data.func_max_blocks_per_sm * data.device_sm_per_device;

}

10 changes: 5 additions & 5 deletions include/RAJA/policy/hip/kernel/Tile.hpp
Original file line number Diff line number Diff line change
@@ -143,7 +143,7 @@ struct HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>, Types>
{

@@ -153,7 +153,7 @@ struct HipStatementExecutor<

using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>>;
using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>>;

static inline RAJA_DEVICE
void exec(Data &data, bool thread_active)
@@ -233,7 +233,7 @@ struct HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>, Types>
{

@@ -243,7 +243,7 @@ struct HipStatementExecutor<

using diff_t = segment_diff_type<ArgumentId, Data>;

using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>>;
using DimensionCalculator = KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>>;

static inline RAJA_DEVICE
void exec(Data &data, bool thread_active)
@@ -318,7 +318,7 @@ struct HipStatementExecutor<
Data,
statement::Tile<ArgumentId, TPol, seq_exec, EnclosedStmts...>, Types>
: HipStatementExecutor<Data, statement::Tile<ArgumentId, TPol,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
hip::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
14 changes: 7 additions & 7 deletions include/RAJA/policy/hip/kernel/TileTCount.hpp
Original file line number Diff line number Diff line change
@@ -131,22 +131,22 @@ struct HipStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>
: public HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::sync, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::sync, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -209,22 +209,22 @@ struct HipStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>
: public HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types> {

using Base = HipStatementExecutor<
Data,
statement::Tile<ArgumentId,
RAJA::tile_fixed<chunk_size>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop, kernel_sync_requirement::none, IndexMapper>,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>, kernel_sync_requirement::none, IndexMapper>,
EnclosedStmts...>,
Types>;

@@ -281,7 +281,7 @@ struct HipStatementExecutor<
Data,
statement::TileTCount<ArgumentId, ParamId, TPol, seq_exec, EnclosedStmts...>, Types>
: HipStatementExecutor<Data, statement::TileTCount<ArgumentId, ParamId, TPol,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
hip::IndexGlobal<named_dim::x, named_usage::ignored, named_usage::ignored>>,
EnclosedStmts...>, Types>
18 changes: 9 additions & 9 deletions include/RAJA/policy/hip/kernel/internal.hpp
Original file line number Diff line number Diff line change
@@ -388,7 +388,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin

// specialization for strided loop sequential policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::ignored, named_usage::ignored>>>
{
@@ -402,7 +402,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin

// specialization for strided loop thread policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::unspecified, named_usage::ignored>>>
{
@@ -418,7 +418,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin
};
///
template<named_dim dim, int BLOCK_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, BLOCK_SIZE, named_usage::ignored>>>
{
@@ -436,7 +436,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin

// specialization for strided loop block policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::ignored, named_usage::unspecified>>>
{
@@ -451,7 +451,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin
};
///
template<named_dim dim, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::ignored, GRID_SIZE>>>
{
@@ -469,7 +469,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin

// specialization for strided loop global policies
template<named_dim dim, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::unspecified, named_usage::unspecified>>>
{
@@ -488,7 +488,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin
};
///
template<named_dim dim, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, named_usage::unspecified, GRID_SIZE>>>
{
@@ -508,7 +508,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin
};
///
template<named_dim dim, int BLOCK_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, BLOCK_SIZE, named_usage::unspecified>>>
{
@@ -527,7 +527,7 @@ struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mappin
};
///
template<named_dim dim, int BLOCK_SIZE, int GRID_SIZE, kernel_sync_requirement sync>
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop,
struct KernelDimensionCalculator<RAJA::policy::hip::hip_indexer<iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
hip::IndexGlobal<dim, BLOCK_SIZE, GRID_SIZE>>>
{
24 changes: 12 additions & 12 deletions include/RAJA/policy/hip/launch.hpp
Original file line number Diff line number Diff line change
@@ -433,7 +433,7 @@ struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::Direc
};

template <typename SEGMENT, typename IndexMapper>
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -457,7 +457,7 @@ struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::Strid
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -493,7 +493,7 @@ struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::Strid
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -625,7 +625,7 @@ struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping:
};

template <typename SEGMENT, typename IndexMapper>
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -649,7 +649,7 @@ struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping:
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -686,7 +686,7 @@ struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping:
};

template <typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopICountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -810,18 +810,18 @@ struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mappin
};

template<typename SEGMENT, kernel_sync_requirement sync, typename IndexMapper0>
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
IndexMapper0>,
SEGMENT>
: LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
: LoopExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
sync,
IndexMapper0>,
SEGMENT>
{};

template<typename SEGMENT, typename IndexMapper0, typename IndexMapper1>
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1>,
@@ -852,7 +852,7 @@ struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mappin
};

template<typename SEGMENT, typename IndexMapper0, typename IndexMapper1, typename IndexMapper2>
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop,
struct LoopExecute<RAJA::policy::hip::hip_flatten_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper0,
IndexMapper1,
@@ -914,7 +914,7 @@ struct TileExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::Direc
};

template <typename SEGMENT, typename IndexMapper>
struct TileExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct TileExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
@@ -964,7 +964,7 @@ struct TileTCountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping:
};

template <typename SEGMENT, typename IndexMapper>
struct TileTCountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop,
struct TileTCountExecute<RAJA::policy::hip::hip_indexer<RAJA::iteration_mapping::StridedLoop<named_usage::unspecified>,
kernel_sync_requirement::none,
IndexMapper>,
SEGMENT> {
279 changes: 264 additions & 15 deletions include/RAJA/policy/hip/policy.hpp

Large diffs are not rendered by default.

750 changes: 365 additions & 385 deletions include/RAJA/policy/hip/reduce.hpp

Large diffs are not rendered by default.

12 changes: 8 additions & 4 deletions include/RAJA/policy/hip/scan.hpp
Original file line number Diff line number Diff line change
@@ -49,14 +49,15 @@ namespace scan
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
bool Async,
typename InputIter,
typename Function>
RAJA_INLINE
resources::EventProxy<resources::Hip>
inclusive_inplace(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
InputIter begin,
InputIter end,
Function binary_op)
@@ -121,6 +122,7 @@ inclusive_inplace(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
bool Async,
typename InputIter,
typename Function,
@@ -129,7 +131,7 @@ RAJA_INLINE
resources::EventProxy<resources::Hip>
exclusive_inplace(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
InputIter begin,
InputIter end,
Function binary_op,
@@ -198,6 +200,7 @@ exclusive_inplace(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
bool Async,
typename InputIter,
typename OutputIter,
@@ -206,7 +209,7 @@ RAJA_INLINE
resources::EventProxy<resources::Hip>
inclusive(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
InputIter begin,
InputIter end,
OutputIter out,
@@ -271,6 +274,7 @@ inclusive(
*/
template <typename IterationMapping,
typename IterationGetter,
typename Concretizer,
bool Async,
typename InputIter,
typename OutputIter,
@@ -280,7 +284,7 @@ RAJA_INLINE
resources::EventProxy<resources::Hip>
exclusive(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
InputIter begin,
InputIter end,
OutputIter out,
66 changes: 42 additions & 24 deletions include/RAJA/policy/hip/sort.hpp
Original file line number Diff line number Diff line change
@@ -73,7 +73,9 @@ namespace detail
/*!
\brief static assert unimplemented stable sort
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter, typename Compare>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
concepts::negate<concepts::all_of<
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
@@ -83,7 +85,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Hip>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<Iter>>>>>>>
stable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
Iter,
Iter,
Compare)
@@ -102,13 +104,15 @@ stable(
/*!
\brief stable sort given range in ascending order
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
stable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
Iter begin,
Iter end,
operators::less<RAJA::detail::IterVal<Iter>>)
@@ -190,13 +194,15 @@ stable(
/*!
\brief stable sort given range in descending order
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
stable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
Iter begin,
Iter end,
operators::greater<RAJA::detail::IterVal<Iter>>)
@@ -279,7 +285,9 @@ stable(
/*!
\brief static assert unimplemented sort
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter, typename Compare>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
concepts::negate<concepts::all_of<
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
@@ -289,7 +297,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Hip>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<Iter>>>>>>>
unstable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
Iter,
Iter,
Compare)
@@ -308,13 +316,15 @@ unstable(
/*!
\brief sort given range in ascending order
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
unstable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async> p,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async> p,
Iter begin,
Iter end,
operators::less<RAJA::detail::IterVal<Iter>> comp)
@@ -325,13 +335,15 @@ unstable(
/*!
\brief sort given range in descending order
*/
template <typename IterationMapping, typename IterationGetter, bool Async, typename Iter>
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename Iter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<Iter>>,
std::is_pointer<Iter>>
unstable(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async> p,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async> p,
Iter begin,
Iter end,
operators::greater<RAJA::detail::IterVal<Iter>> comp)
@@ -343,7 +355,8 @@ unstable(
/*!
\brief static assert unimplemented stable sort pairs
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
concepts::negate<concepts::all_of<
@@ -355,7 +368,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Hip>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<KeyIter>>>>>>>
stable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
KeyIter,
KeyIter,
ValIter,
@@ -379,15 +392,16 @@ stable_pairs(
/*!
\brief stable sort given range of pairs in ascending order of keys
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
stable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -483,15 +497,16 @@ stable_pairs(
/*!
\brief stable sort given range of pairs in descending order of keys
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
stable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -588,7 +603,8 @@ stable_pairs(
/*!
\brief static assert unimplemented sort pairs
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter, typename Compare>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
concepts::negate<concepts::all_of<
@@ -600,7 +616,7 @@ concepts::enable_if_t<resources::EventProxy<resources::Hip>,
camp::is_same<Compare, operators::greater<RAJA::detail::IterVal<KeyIter>>>>>>>
unstable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async>,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async>,
KeyIter,
KeyIter,
ValIter,
@@ -624,15 +640,16 @@ unstable_pairs(
/*!
\brief stable sort given range of pairs in ascending order of keys
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
unstable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async> p,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async> p,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
@@ -644,15 +661,16 @@ unstable_pairs(
/*!
\brief stable sort given range of pairs in descending order of keys
*/
template <typename IterationMapping, typename IterationGetter, bool Async,
template <typename IterationMapping, typename IterationGetter,
typename Concretizer, bool Async,
typename KeyIter, typename ValIter>
concepts::enable_if_t<resources::EventProxy<resources::Hip>,
type_traits::is_arithmetic<RAJA::detail::IterVal<KeyIter>>,
std::is_pointer<KeyIter>,
std::is_pointer<ValIter>>
unstable_pairs(
resources::Hip hip_res,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Async> p,
::RAJA::policy::hip::hip_exec<IterationMapping, IterationGetter, Concretizer, Async> p,
KeyIter keys_begin,
KeyIter keys_end,
ValIter vals_begin,
35 changes: 0 additions & 35 deletions include/RAJA/policy/loop.hpp

This file was deleted.

87 changes: 0 additions & 87 deletions include/RAJA/policy/loop/policy.hpp

This file was deleted.

16 changes: 8 additions & 8 deletions include/RAJA/policy/sycl/launch.hpp
Original file line number Diff line number Diff line change
@@ -56,13 +56,13 @@ struct LaunchExecute<RAJA::sycl_launch_t<async, 0>> {
// Compute the number of blocks and threads
//

const ::sycl::range<3> blockSize(params.threads.value[0],
const ::sycl::range<3> blockSize(params.threads.value[2],
params.threads.value[1],
params.threads.value[2]);
params.threads.value[0]);

const ::sycl::range<3> gridSize(params.threads.value[0] * params.teams.value[0],
const ::sycl::range<3> gridSize(params.threads.value[2] * params.teams.value[2],
params.threads.value[1] * params.teams.value[1],
params.threads.value[2] * params.teams.value[2]);
params.threads.value[0] * params.teams.value[0]);

// Only launch kernel if we have something to iterate over
constexpr size_t zero = 0;
@@ -138,13 +138,13 @@ struct LaunchExecute<RAJA::sycl_launch_t<async, 0>> {
// Compute the number of blocks and threads
//

const ::sycl::range<3> blockSize(params.threads.value[0],
const ::sycl::range<3> blockSize(params.threads.value[2],
params.threads.value[1],
params.threads.value[2]);
params.threads.value[0]);

const ::sycl::range<3> gridSize(params.threads.value[0] * params.teams.value[0],
const ::sycl::range<3> gridSize(params.threads.value[2] * params.teams.value[2],
params.threads.value[1] * params.teams.value[1],
params.threads.value[2] * params.teams.value[2]);
params.threads.value[0] * params.teams.value[0]);

// Only launch kernel if we have something to iterate over
constexpr size_t zero = 0;
21 changes: 19 additions & 2 deletions include/RAJA/util/Operators.hpp
Original file line number Diff line number Diff line change
@@ -42,9 +42,20 @@ namespace operators
namespace detail
{

// truly associative (does not include fp add/multiply)
struct associative_tag {
};

// associative up to floating point rounding differences
struct fp_associative_tag : associative_tag {
};

// get associativity tag appropriate for the type
template < typename T >
using associative_or_fp_associative_tag =
std::conditional_t<std::is_floating_point<std::decay_t<T>>::value,
fp_associative_tag, associative_tag>;

template <typename Arg1, typename Arg2, typename Result>
struct binary_function {
using first_argument_type = Arg1;
@@ -327,7 +338,7 @@ static_assert(check<unsigned long long>(),

template <typename Ret, typename Arg1 = Ret, typename Arg2 = Arg1>
struct plus : public detail::binary_function<Arg1, Arg2, Ret>,
detail::associative_tag {
detail::associative_or_fp_associative_tag<Ret> {
RAJA_HOST_DEVICE constexpr Ret operator()(const Arg1& lhs,
const Arg2& rhs) const
{
@@ -347,7 +358,7 @@ struct minus : public detail::binary_function<Arg1, Arg2, Ret> {

template <typename Ret, typename Arg1 = Ret, typename Arg2 = Arg1>
struct multiplies : public detail::binary_function<Arg1, Arg2, Ret>,
detail::associative_tag {
detail::associative_or_fp_associative_tag<Ret> {

RAJA_HOST_DEVICE constexpr Ret operator()(const Arg1& lhs,
const Arg2& rhs) const
@@ -569,6 +580,12 @@ struct is_associative {
std::is_base_of<detail::associative_tag, T>::value;
};

template <typename T>
struct is_fp_associative {
static constexpr const bool value =
std::is_base_of<detail::fp_associative_tag, T>::value;
};

template <typename Arg1, typename Arg2 = Arg1>
struct safe_plus
: public plus<Arg1,
Loading

0 comments on commit 751fc03

Please sign in to comment.