-
Notifications
You must be signed in to change notification settings - Fork 2.9k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'PaddlePaddle:develop' into dev_unified_checkpoint
- Loading branch information
Showing
75 changed files
with
4,865 additions
and
173 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#include <stdio.h> | ||
#include <string.h> | ||
#include <sys/ipc.h> | ||
#include <sys/msg.h> | ||
#include <sys/types.h> | ||
#include "paddle/extension.h" | ||
|
||
#define MAX_BSZ 512 | ||
|
||
struct msgdata { | ||
long mtype; | ||
int mtext[MAX_BSZ + 2]; // stop_flag, bsz, tokens | ||
}; | ||
|
||
void GetOutput(const paddle::Tensor& x, | ||
int64_t rank_id, | ||
bool wait_flag) { | ||
if (rank_id > 0) return; | ||
|
||
static struct msgdata msg_rcv; | ||
|
||
static key_t key = ftok("./", 1); | ||
|
||
static int msgid = msgget(key, IPC_CREAT | 0666); | ||
|
||
int64_t *out_data = const_cast<int64_t*>(x.data<int64_t>()); | ||
int ret = -1; | ||
if (!wait_flag) { | ||
ret = msgrcv(msgid, &msg_rcv, (MAX_BSZ + 2) * 4, 0, IPC_NOWAIT); | ||
} else { | ||
ret = msgrcv(msgid, &msg_rcv, (MAX_BSZ + 2) * 4, 0, 0); | ||
} | ||
if(ret == -1) | ||
{ | ||
// read none | ||
out_data[0] = -2; | ||
out_data[1] = 0; | ||
return; | ||
} | ||
|
||
int bsz = msg_rcv.mtext[1]; | ||
|
||
for (int64_t i = 0; i < bsz + 2; i++) { | ||
out_data[i] = (int64_t)msg_rcv.mtext[i]; | ||
} | ||
return; | ||
} | ||
|
||
PD_BUILD_OP(get_output) | ||
.Inputs({"x"}) | ||
.Attrs({"rank_id: int64_t", | ||
"wait_flag: bool"}) | ||
.Outputs({"x_out"}) | ||
.SetInplaceMap({{"x", "x_out"}}) | ||
.SetKernelFn(PD_KERNEL(GetOutput)); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
#include "paddle/extension.h" | ||
|
||
__global__ void RemovePaddingV2(int64_t *output_data, | ||
const int64_t *input_data, | ||
const int *seq_lens, | ||
const int *cum_offsets, | ||
const int sequence_length) { | ||
const int bi = blockIdx.x; | ||
const int tid = threadIdx.x; | ||
|
||
for (int i = tid; i < seq_lens[bi]; i += blockDim.x) { | ||
const int tgt_seq_id = bi * sequence_length - cum_offsets[bi] + i; | ||
const int src_seq_id = bi * sequence_length + i; | ||
output_data[tgt_seq_id] = input_data[src_seq_id]; | ||
} | ||
} | ||
|
||
__global__ void GetPaddingOffsetKernelV2(int *padding_offset, | ||
int *cum_offsets_out, | ||
int *cu_seqlens_q, | ||
int *cu_seqlens_k, | ||
const int *cum_offsets, | ||
const int *seq_lens, | ||
const int max_seq_len) { | ||
// get padding offset of each batch | ||
const int bi = blockIdx.x; | ||
const int ti = threadIdx.x; | ||
int cum_offset = bi == 0 ? 0 : cum_offsets[bi - 1]; | ||
for (int i = ti; i < seq_lens[bi]; i += blockDim.x) { | ||
padding_offset[bi * max_seq_len - cum_offset + i] = cum_offset; | ||
} | ||
if (ti == 0) { | ||
cum_offsets_out[bi] = cum_offset; | ||
int cum_seq_len = (bi + 1) * max_seq_len - cum_offsets[bi]; | ||
cu_seqlens_q[bi + 1] = cum_seq_len; | ||
cu_seqlens_k[bi + 1] = cum_seq_len; | ||
} | ||
} | ||
|
||
|
||
std::vector<paddle::Tensor> GetPaddingOffsetV2(const paddle::Tensor& input_ids, | ||
const paddle::Tensor& cum_offsets, | ||
const paddle::Tensor& token_num, | ||
const paddle::Tensor& seq_len) { | ||
auto cu_stream = input_ids.stream(); | ||
std::vector<int64_t> input_ids_shape = input_ids.shape(); | ||
const int bsz = seq_len.shape()[0]; | ||
const int seq_length = input_ids_shape[1]; | ||
auto cum_offsets_out = cum_offsets.copy_to(cum_offsets.place(), false); | ||
auto cpu_token_num = token_num.copy_to(paddle::CPUPlace(), false); | ||
|
||
const int token_num_data = cpu_token_num.data<int64_t>()[0]; | ||
auto x_remove_padding = paddle::full({token_num_data}, 0, paddle::DataType::INT64, input_ids.place()); | ||
auto padding_offset = paddle::full({token_num_data}, 0, paddle::DataType::INT32, input_ids.place()); | ||
auto cu_seqlens_q = paddle::full({bsz + 1}, 0, paddle::DataType::INT32, input_ids.place()); | ||
auto cu_seqlens_k = paddle::full({bsz + 1}, 0, paddle::DataType::INT32, input_ids.place()); | ||
int blockSize = min((token_num_data + 32 - 1) / 32 * 32, 128); | ||
GetPaddingOffsetKernelV2<<<bsz, 128, 0, cu_stream>>>( | ||
padding_offset.data<int>(), | ||
cum_offsets_out.data<int>(), | ||
cu_seqlens_q.data<int>(), | ||
cu_seqlens_k.data<int>(), | ||
cum_offsets.data<int>(), | ||
seq_len.data<int>(), | ||
seq_length); | ||
RemovePaddingV2<<<bsz, blockSize, 0, cu_stream>>>( | ||
x_remove_padding.data<int64_t>(), | ||
input_ids.data<int64_t>(), | ||
seq_len.data<int>(), | ||
cum_offsets_out.data<int>(), | ||
seq_length); | ||
return {x_remove_padding, cum_offsets_out, padding_offset, cu_seqlens_q, cu_seqlens_k}; // , enc_token_num, dec_token_num}; | ||
} | ||
|
||
std::vector<std::vector<int64_t>> GetPaddingOffsetV2InferShape(const std::vector<int64_t>& input_ids_shape, | ||
const std::vector<int64_t>& cum_offsets_shape, | ||
const std::vector<int64_t>& token_num_shape, | ||
const std::vector<int64_t>& seq_len_shape) { | ||
int64_t bsz = seq_len_shape[0]; | ||
int64_t seq_len = input_ids_shape[1]; | ||
return {{-1}, {bsz}, {-1}, {bsz + 1}, {bsz + 1}}; | ||
} | ||
|
||
std::vector<paddle::DataType> GetPaddingOffsetV2InferDtype(const paddle::DataType& input_ids_dtype, | ||
const paddle::DataType& cum_offsets_dtype, | ||
const paddle::DataType& token_num_dtype, | ||
const paddle::DataType& seq_len_dtype) { | ||
return {input_ids_dtype, seq_len_dtype, seq_len_dtype, seq_len_dtype, seq_len_dtype}; | ||
} | ||
|
||
PD_BUILD_OP(get_padding_offset_v2) | ||
.Inputs({"input_ids", "token_num", "cum_offsets", "seq_len"}) | ||
.Outputs({"x_remove_padding", "cum_offsets_out", "padding_offset", "cu_seqlens_q", "cu_seqlens_k"}) | ||
.SetKernelFn(PD_KERNEL(GetPaddingOffsetV2)) | ||
.SetInferShapeFn(PD_INFER_SHAPE(GetPaddingOffsetV2InferShape)) | ||
.SetInferDtypeFn(PD_INFER_DTYPE(GetPaddingOffsetV2InferDtype)); |
Oops, something went wrong.