Skip to content

Commit 4fec86b

Browse files
authored
【CUDA Kernel No.12】fused_stack_transpose_quant_kernel算子Kernel修复 (#75658)
1 parent e7a1898 commit 4fec86b

File tree

2 files changed

+37
-0
lines changed

2 files changed

+37
-0
lines changed

paddle/phi/kernels/fusion/gpu/fused_stack_transpose_quant_kernel.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
#include "paddle/phi/kernels/fusion/gpu/fused_stack_transpose_quant_kernel.h"
1516
#include "paddle/phi/backends/gpu/gpu_context.h"
1617
#include "paddle/phi/core/dense_tensor.h"
1718
#include "paddle/phi/core/kernel_registry.h"
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include <vector>
18+
#include "paddle/phi/core/dense_tensor.h"
19+
20+
namespace phi {
21+
namespace fusion {
22+
23+
template <typename T, typename Context>
24+
void FusedStackQuantKernel(const Context& dev_ctx,
25+
const std::vector<const DenseTensor*>& x,
26+
DenseTensor* out,
27+
DenseTensor* scale);
28+
29+
template <typename T, typename Context>
30+
void FusedStackTransposeQuantKernel(const Context& dev_ctx,
31+
const std::vector<const DenseTensor*>& x,
32+
DenseTensor* out,
33+
DenseTensor* scale);
34+
35+
} // namespace fusion
36+
} // namespace phi

0 commit comments

Comments
 (0)