-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[Offload][AMDGPU] Correctly handle variable implicit argument sizes #142199
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-amdgpu Author: Joseph Huber (jhuber6) ChangesSummary: Additionally, we modify the Full diff: https://github.com/llvm/llvm-project/pull/142199.diff 2 Files Affected:
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index 2733796611d9b..88f3b1ac5416c 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -420,7 +420,7 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy {
assert(PtrStorage && "Invalid pointer storage");
*PtrStorage = MemoryManager->allocate(Size, nullptr);
- if (*PtrStorage == nullptr)
+ if (Size && *PtrStorage == nullptr)
return Plugin::error(ErrorCode::OUT_OF_RESOURCES,
"failure to allocate from AMDGPU memory manager");
@@ -429,7 +429,8 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy {
/// Release an allocation to be reused.
Error deallocate(void *Ptr) {
- assert(Ptr && "Invalid pointer");
+ if (!Ptr)
+ return Plugin::success();
if (MemoryManager->free(Ptr))
return Plugin::error(ErrorCode::UNKNOWN,
@@ -3365,7 +3366,7 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
KernelLaunchParamsTy LaunchParams,
AsyncInfoWrapperTy &AsyncInfoWrapper) const {
if (ArgsSize != LaunchParams.Size &&
- ArgsSize != LaunchParams.Size + getImplicitArgsSize())
+ ArgsSize > LaunchParams.Size + getImplicitArgsSize())
return Plugin::error(ErrorCode::INVALID_ARGUMENT,
"mismatch of kernel arguments size");
@@ -3401,23 +3402,39 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
if (auto Err = AMDGPUDevice.getStream(AsyncInfoWrapper, Stream))
return Err;
- hsa_utils::AMDGPUImplicitArgsTy *ImplArgs = nullptr;
- if (ArgsSize == LaunchParams.Size + getImplicitArgsSize()) {
- ImplArgs = reinterpret_cast<hsa_utils::AMDGPUImplicitArgsTy *>(
- utils::advancePtr(AllArgs, LaunchParams.Size));
-
- // Set the COV5+ implicit arguments to the appropriate values.
- std::memset(ImplArgs, 0, getImplicitArgsSize());
- ImplArgs->BlockCountX = NumBlocks[0];
- ImplArgs->BlockCountY = NumBlocks[1];
- ImplArgs->BlockCountZ = NumBlocks[2];
- ImplArgs->GroupSizeX = NumThreads[0];
- ImplArgs->GroupSizeY = NumThreads[1];
- ImplArgs->GroupSizeZ = NumThreads[2];
- ImplArgs->GridDims = NumBlocks[2] * NumThreads[2] > 1
- ? 3
- : 1 + (NumBlocks[1] * NumThreads[1] != 1);
- ImplArgs->DynamicLdsSize = KernelArgs.DynCGroupMem;
+ uint64_t ImplArgsOffset = utils::roundUp(
+ LaunchParams.Size, alignof(hsa_utils::AMDGPUImplicitArgsTy));
+ if (ArgsSize > ImplArgsOffset) {
+ hsa_utils::AMDGPUImplicitArgsTy *ImplArgs =
+ reinterpret_cast<hsa_utils::AMDGPUImplicitArgsTy *>(
+ utils::advancePtr(AllArgs, ImplArgsOffset));
+
+ // Set the COV5+ implicit arguments to the appropriate values if present.
+ uint64_t ImplArgsSize = ArgsSize - ImplArgsOffset;
+ std::memset(ImplArgs, 0, ImplArgsSize);
+
+ using ImplArgsTy = hsa_utils::AMDGPUImplicitArgsTy;
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountX, ImplArgsSize,
+ NumBlocks[0]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountY, ImplArgsSize,
+ NumBlocks[1]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountZ, ImplArgsSize,
+ NumBlocks[2]);
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeX, ImplArgsSize,
+ NumThreads[0]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeY, ImplArgsSize,
+ NumThreads[1]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeZ, ImplArgsSize,
+ NumThreads[2]);
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GridDims, ImplArgsSize,
+ NumBlocks[2] * NumThreads[2] > 1
+ ? 3
+ : 1 + (NumBlocks[1] * NumThreads[1] != 1));
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::DynamicLdsSize, ImplArgsSize,
+ KernelArgs.DynCGroupMem);
}
// Push the kernel launch into the stream.
diff --git a/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h b/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
index 609ead942dbb3..77c756e006029 100644
--- a/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
+++ b/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
@@ -13,6 +13,7 @@
#include <cstdint>
#include "Shared/Debug.h"
+#include "Shared/Utils.h"
#include "Utils/ELF.h"
#include "omptarget.h"
@@ -26,7 +27,7 @@ namespace plugin {
namespace hsa_utils {
// The implicit arguments of COV5 AMDGPU kernels.
-struct AMDGPUImplicitArgsTy {
+struct alignas(alignof(void *)) AMDGPUImplicitArgsTy {
uint32_t BlockCountX;
uint32_t BlockCountY;
uint32_t BlockCountZ;
@@ -60,6 +61,18 @@ inline Error readAMDGPUMetaDataFromImage(
return Err;
}
+/// Initializes the HSA implicit argument if the struct size permits it. This is
+/// necessary because optimizations can modify the size of the struct if
+/// portions of it are unused.
+template <typename MemberTy, typename T>
+void initImplArg(AMDGPUImplicitArgsTy *Base,
+ MemberTy AMDGPUImplicitArgsTy::*Member, size_t AvailableSize,
+ T Value) {
+ uint64_t Offset = utils::getPtrDiff(&(Base->*Member), Base);
+ if (Offset + sizeof(MemberTy) <= AvailableSize)
+ Base->*Member = static_cast<MemberTy>(Value);
+}
+
} // namespace hsa_utils
} // namespace plugin
} // namespace target
|
@llvm/pr-subscribers-offload Author: Joseph Huber (jhuber6) ChangesSummary: Additionally, we modify the Full diff: https://github.com/llvm/llvm-project/pull/142199.diff 2 Files Affected:
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index 2733796611d9b..88f3b1ac5416c 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -420,7 +420,7 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy {
assert(PtrStorage && "Invalid pointer storage");
*PtrStorage = MemoryManager->allocate(Size, nullptr);
- if (*PtrStorage == nullptr)
+ if (Size && *PtrStorage == nullptr)
return Plugin::error(ErrorCode::OUT_OF_RESOURCES,
"failure to allocate from AMDGPU memory manager");
@@ -429,7 +429,8 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy {
/// Release an allocation to be reused.
Error deallocate(void *Ptr) {
- assert(Ptr && "Invalid pointer");
+ if (!Ptr)
+ return Plugin::success();
if (MemoryManager->free(Ptr))
return Plugin::error(ErrorCode::UNKNOWN,
@@ -3365,7 +3366,7 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
KernelLaunchParamsTy LaunchParams,
AsyncInfoWrapperTy &AsyncInfoWrapper) const {
if (ArgsSize != LaunchParams.Size &&
- ArgsSize != LaunchParams.Size + getImplicitArgsSize())
+ ArgsSize > LaunchParams.Size + getImplicitArgsSize())
return Plugin::error(ErrorCode::INVALID_ARGUMENT,
"mismatch of kernel arguments size");
@@ -3401,23 +3402,39 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
if (auto Err = AMDGPUDevice.getStream(AsyncInfoWrapper, Stream))
return Err;
- hsa_utils::AMDGPUImplicitArgsTy *ImplArgs = nullptr;
- if (ArgsSize == LaunchParams.Size + getImplicitArgsSize()) {
- ImplArgs = reinterpret_cast<hsa_utils::AMDGPUImplicitArgsTy *>(
- utils::advancePtr(AllArgs, LaunchParams.Size));
-
- // Set the COV5+ implicit arguments to the appropriate values.
- std::memset(ImplArgs, 0, getImplicitArgsSize());
- ImplArgs->BlockCountX = NumBlocks[0];
- ImplArgs->BlockCountY = NumBlocks[1];
- ImplArgs->BlockCountZ = NumBlocks[2];
- ImplArgs->GroupSizeX = NumThreads[0];
- ImplArgs->GroupSizeY = NumThreads[1];
- ImplArgs->GroupSizeZ = NumThreads[2];
- ImplArgs->GridDims = NumBlocks[2] * NumThreads[2] > 1
- ? 3
- : 1 + (NumBlocks[1] * NumThreads[1] != 1);
- ImplArgs->DynamicLdsSize = KernelArgs.DynCGroupMem;
+ uint64_t ImplArgsOffset = utils::roundUp(
+ LaunchParams.Size, alignof(hsa_utils::AMDGPUImplicitArgsTy));
+ if (ArgsSize > ImplArgsOffset) {
+ hsa_utils::AMDGPUImplicitArgsTy *ImplArgs =
+ reinterpret_cast<hsa_utils::AMDGPUImplicitArgsTy *>(
+ utils::advancePtr(AllArgs, ImplArgsOffset));
+
+ // Set the COV5+ implicit arguments to the appropriate values if present.
+ uint64_t ImplArgsSize = ArgsSize - ImplArgsOffset;
+ std::memset(ImplArgs, 0, ImplArgsSize);
+
+ using ImplArgsTy = hsa_utils::AMDGPUImplicitArgsTy;
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountX, ImplArgsSize,
+ NumBlocks[0]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountY, ImplArgsSize,
+ NumBlocks[1]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::BlockCountZ, ImplArgsSize,
+ NumBlocks[2]);
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeX, ImplArgsSize,
+ NumThreads[0]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeY, ImplArgsSize,
+ NumThreads[1]);
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GroupSizeZ, ImplArgsSize,
+ NumThreads[2]);
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::GridDims, ImplArgsSize,
+ NumBlocks[2] * NumThreads[2] > 1
+ ? 3
+ : 1 + (NumBlocks[1] * NumThreads[1] != 1));
+
+ hsa_utils::initImplArg(ImplArgs, &ImplArgsTy::DynamicLdsSize, ImplArgsSize,
+ KernelArgs.DynCGroupMem);
}
// Push the kernel launch into the stream.
diff --git a/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h b/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
index 609ead942dbb3..77c756e006029 100644
--- a/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
+++ b/offload/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
@@ -13,6 +13,7 @@
#include <cstdint>
#include "Shared/Debug.h"
+#include "Shared/Utils.h"
#include "Utils/ELF.h"
#include "omptarget.h"
@@ -26,7 +27,7 @@ namespace plugin {
namespace hsa_utils {
// The implicit arguments of COV5 AMDGPU kernels.
-struct AMDGPUImplicitArgsTy {
+struct alignas(alignof(void *)) AMDGPUImplicitArgsTy {
uint32_t BlockCountX;
uint32_t BlockCountY;
uint32_t BlockCountZ;
@@ -60,6 +61,18 @@ inline Error readAMDGPUMetaDataFromImage(
return Err;
}
+/// Initializes the HSA implicit argument if the struct size permits it. This is
+/// necessary because optimizations can modify the size of the struct if
+/// portions of it are unused.
+template <typename MemberTy, typename T>
+void initImplArg(AMDGPUImplicitArgsTy *Base,
+ MemberTy AMDGPUImplicitArgsTy::*Member, size_t AvailableSize,
+ T Value) {
+ uint64_t Offset = utils::getPtrDiff(&(Base->*Member), Base);
+ if (Offset + sizeof(MemberTy) <= AvailableSize)
+ Base->*Member = static_cast<MemberTy>(Value);
+}
+
} // namespace hsa_utils
} // namespace plugin
} // namespace target
|
Summary: The size of the implicit argument struct can vary depending on optimizations, it is not always the size as listed by the full struct. Additionally, the implicit arguments are always aligned on a pointer boundary. This patch updates the handling to use the correctly aligned offset and only initialize the members if they are contained in the reported size. Additionally, we modify the `alloc` and `free` routines to allow `alloc(0)` and `free(nullptr)` as these are mandated by the C standard and allow us to easily handle cases where the user calls a kernel with no arguments.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If the implicit argument is not full set, how can we guarantee the order and existence of which arguments?
Additionally, we modify the alloc and free routines to allow
alloc(0) and free(nullptr) as these are mandated by the C standard
and allow us to easily handle cases where the user calls a kernel with
no arguments.
A separate PR pls.
Hold on, what "optimisation" is changing this struct? It's exposed in the ABI, in the literal sense, so LLVM really should not be hacking around with it. |
You can observe it w/ optimizations and checking the notes, @arsenm would probably know explicitly where it is. It's probably find as long as the runtime is aware of this since it reports the actual size of the struct which lets you know how much of the implicit args it's using. |
…lvm#142199) Summary: The size of the implicit argument struct can vary depending on optimizations, it is not always the size as listed by the full struct. Additionally, the implicit arguments are always aligned on a pointer boundary. This patch updates the handling to use the correctly aligned offset and only initialize the members if they are contained in the reported size. Additionally, we modify the `alloc` and `free` routines to allow `alloc(0)` and `free(nullptr)` as these are mandated by the C standard and allow us to easily handle cases where the user calls a kernel with no arguments.
Summary:
The size of the implicit argument struct can vary depending on
optimizations, it is not always the size as listed by the full struct.
Additionally, the implicit arguments are always aligned on a pointer
boundary. This patch updates the handling to use the correctly aligned
offset and only initialize the members if they are contained in the
reported size.
Additionally, we modify the
alloc
andfree
routines to allowalloc(0)
andfree(nullptr)
as these are mandated by the C standardand allow us to easily handle cases where the user calls a kernel with
no arguments.