From e3e6290310bf97fd4d9ebb3bf022e8e5d58a060a Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Fri, 3 Jan 2025 19:17:05 -0600 Subject: [PATCH] [ET-VK] Minor fix to conv 2d op using wg_size from create_conv2d_global_wg_size to determine local wg size. (#7500) * [ET-VK] Reduced int precision for all int storage in conv pw op to improve performance. Pull Request resolved: https://github.com/pytorch/executorch/pull/7447 This diff reduces the precision of all int storage in the conv pw op to improve performance. The code changes include adding the extension GL_EXT_shader_explicit_arithmetic_types_int16 and changing the data type of ints to uint16. ghstack-source-id: 260166244 @exported-using-ghexport Differential Revision: [D67674212](https://our.internmc.facebook.com/intern/diff/D67674212/) * [ET-VK] Minor fix to conv 2d op using wg_size from create_conv2d_global_wg_size to determine local wg size. Pull Request resolved: https://github.com/pytorch/executorch/pull/7450 This diff contains changes to the Convolution.cpp file in the Vulkan backend of Executorch. The changes involve updating the code to use the create_conv2d_global_wg_size function to determine the local workgroup size for the convolution operation. This is done to ensure that the correct workgroup size is used for the operation, which can improve performance. ghstack-source-id: 260166246 @exported-using-ghexport Differential Revision: [D67676422](https://our.internmc.facebook.com/intern/diff/D67676422/) * [ET-VK] Modify conv 2d pw op shader and dispatch settings to linearly dispatch work accounting for linearity texture to improve performance. (#7501) Pull Request resolved: https://github.com/pytorch/executorch/pull/7452 This diff modifies the convolution 2D pointwise op shader and dispatch settings to linearly dispatch work accounting for linearity texture to improve performance. ghstack-source-id: 260166247 @exported-using-ghexport Differential Revision: [D67683411](https://our.internmc.facebook.com/intern/diff/D67683411/) Co-authored-by: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> --------- Co-authored-by: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> --- backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl | 7 ++++++- backends/vulkan/runtime/graph/ops/impl/Convolution.cpp | 10 ++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl index b1950f970e..9d1f6c3bd9 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl @@ -40,7 +40,12 @@ layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; * size is only 1x1, making it easier to re-use loaded texels from t_kernel. */ void main() { - const u16vec3 gpos = u16vec3(gl_GlobalInvocationID); + const uint16_t out_limits_y_scaled = uint16_t((out_limits.y + TILE_SIZE - 1) / TILE_SIZE); + + const u16vec3 gpos = u16vec3( + gl_GlobalInvocationID.x / (out_limits_y_scaled * out_limits.z), + (gl_GlobalInvocationID.x / out_limits.z) % out_limits_y_scaled, + gl_GlobalInvocationID.x % out_limits.z); // Output position for TILE_SIZE = 2 // +--------+--------+ diff --git a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp index 1cdd7315f1..4f123cb833 100644 --- a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp +++ b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp @@ -370,11 +370,17 @@ void add_conv2d_node( weight_data, clamp_out); + utils::uvec3 wg_size = create_conv2d_global_wg_size(graph, method, out); + + if (method == Conv2dMethod::Pointwise) { + wg_size = {wg_size[0] * wg_size[1] * wg_size[2], 1, 1}; + } + graph.execute_nodes().emplace_back(new DispatchNode( graph, shader, - create_conv2d_global_wg_size(graph, method, out), - graph.create_local_wg_size(out), + wg_size, + graph.create_local_wg_size(wg_size), // Inputs and Outputs {{out, vkapi::MemoryAccessType::WRITE}, {{in, arg_weight, arg_bias}, vkapi::MemoryAccessType::READ}},