Skip to content

Commit

Permalink
[AutoParallel] Set value phi yaml (#58893)
Browse files Browse the repository at this point in the history
* set value to phi
  • Loading branch information
wanghuancoder authored Nov 14, 2023
1 parent 3ab6f55 commit 8bc09e7
Show file tree
Hide file tree
Showing 13 changed files with 145 additions and 111 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -826,7 +826,7 @@ def BackwardValidationCheck(self):
max_grad_tensor_position = -1
for _, (_, _, pos) in backward_grad_inputs_map.items():
assert pos > max_fwd_input_position, AssertMessage(
pos, max_grad_tensor_position
pos, max_fwd_input_position
)
max_grad_tensor_position = max(max_grad_tensor_position, pos)

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/ir_adaptor/translator/op_translator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2216,12 +2216,12 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber {
struct SetValueGradOpTranscriber : public SetValueWithTensorOpTranscriber {
pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx,
const OpDesc& op_desc) override {
std::string target_op_name = dialect::SetValueGradOp::name();
std::string target_op_name = dialect::SetValueWithTensorGradOp::name();
const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name);
if (!op_info) {
IR_THROW(
"Op set_value_grad should have corresponding OpInfo "
"pd_op.set_value_grad");
"pd_op.set_value_with_tensor_grad");
}

return op_info;
Expand Down
24 changes: 0 additions & 24 deletions paddle/fluid/pir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -140,30 +140,6 @@
func : send_v2
param : [x, ring_id, dynamic_shape, peer, use_calc_stream]

- op : set_value
args : (Tensor x, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes, int64_t[] shape, Scalar[] values)
output : Tensor(out)
infer_meta:
func: SetValueInferMeta
param: [x]
kernel:
func: set_value
param: [x, starts, ends, steps, axes, decrease_axes, none_axes, shape, values]
inplace: (x -> out)
backward: set_value_grad

- op : set_value_with_tensor
args : (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(out)
infer_meta:
func: SetValueInferMeta
param: [x]
kernel:
func: set_value_with_tensor
param: [x, values, starts, ends, steps, axes, decrease_axes, none_axes]
inplace: (x -> out)
backward: set_value_grad

- op : shadow_feed
args : (Tensor x)
output : Tensor(out)
Expand Down
10 changes: 0 additions & 10 deletions paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,3 @@
kernel:
func: fused_feedforward_grad
optional: linear1_bias, linear2_bias, ln1_scale, ln1_bias, ln1_out, ln1_mean, ln1_variance, ln2_scale, ln2_bias, ln2_mean, ln2_variance, dropout2_out, ln1_scale_grad, ln1_bias_grad, ln2_scale_grad, ln2_bias_grad, linear2_bias_grad

- backward_op : set_value_grad
args : (Tensor out_grad, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(x_grad), Tensor(values_grad)
infer_meta:
func: SetValueGradInferMeta
param: [out_grad, values]
kernel:
func: set_value_grad
param: [out_grad, starts, ends, steps, axes, decrease_axes, none_axes]
110 changes: 59 additions & 51 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1306,7 +1306,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
EAGER_TRY
PyObject* _index = PyTuple_GET_ITEM(args, 0);
VLOG(4) << "Call _getitem_index_not_tensor";
std::vector<int> slice_axes, slice_starts, slice_ends, slice_strides,
std::vector<int64_t> slice_axes, slice_starts, slice_ends, slice_strides,
decrease_axis, none_axes, infer_flags;
std::vector<int64_t> list_select_idxs;
// if index is a list, list_select_flag will be true
Expand Down Expand Up @@ -1353,26 +1353,25 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
break;
}
}
std::vector<int64_t> slice_axes_tmp(slice_axes.begin(), slice_axes.end());
std::vector<int64_t> infer_flags_tmp(infer_flags.begin(),
infer_flags.end());
std::vector<int64_t> decrease_axis_tmp(decrease_axis.begin(),
decrease_axis.end());

if (op_type == "slice") {
eager_gil_scoped_release guard;
out = slice_ad_func(self->tensor,
slice_axes_tmp,
slice_axes,
slice_starts,
slice_ends,
infer_flags_tmp,
decrease_axis_tmp);
infer_flags,
decrease_axis);
} else if (op_type == "strided_slice") {
eager_gil_scoped_release guard;
out = strided_slice_ad_func(
self->tensor, slice_axes, slice_starts, slice_ends, slice_strides);
if (!decrease_axis_tmp.empty()) {
out = squeeze_ad_func(out, decrease_axis_tmp);
std::vector<int> slice_axes_tmp(slice_axes.begin(), slice_axes.end());
out = strided_slice_ad_func(self->tensor,
slice_axes_tmp,
slice_starts,
slice_ends,
slice_strides);
if (!decrease_axis.empty()) {
out = squeeze_ad_func(out, decrease_axis);
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand Down Expand Up @@ -1607,7 +1606,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
// TODO(liym27): Try not to call TensorToPyArray because it always
// copys data to cpu place, which reduces performance.
if (parse_index) {
std::vector<int> axes, starts, ends, steps, decrease_axes, none_axes,
std::vector<int64_t> axes, starts, ends, steps, decrease_axes, none_axes,
infer_flags;
std::vector<int64_t> list_select_idxs;
// if index is a list, list_select_flag will be true
Expand All @@ -1624,13 +1623,6 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
&list_select_idxs,
&list_select_flag);

framework::AttributeMap attrs = {{"axes", axes},
{"starts", starts},
{"ends", ends},
{"steps", steps},
{"decrease_axes", decrease_axes},
{"none_axes", none_axes}};

if (egr::Controller::Instance().HasGrad()) {
PADDLE_ENFORCE_EQ(
egr::EagerUtils::IsLeafTensor(self->tensor) &&
Expand All @@ -1643,6 +1635,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
}

paddle::Tensor value_tensor;
std::vector<phi::Scalar> values;
std::vector<int64_t> shape = std::vector<int64_t>{1};

if (PyCheckTensor(value_obj)) {
value_tensor = reinterpret_cast<TensorObject*>(value_obj)->tensor;
Expand Down Expand Up @@ -1706,25 +1700,20 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
PyComplex_Check(value_obj)) {
if (self->tensor.dtype() == phi::DataType::FLOAT32 ||
self->tensor.dtype() == phi::DataType::FLOAT16) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
value_obj_tmp.cast<float>()};
values = std::vector<phi::Scalar>{value_obj_tmp.cast<float>()};
} else if (self->tensor.dtype() == phi::DataType::FLOAT64) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
value_obj_tmp.cast<double>()};
values = std::vector<phi::Scalar>{value_obj_tmp.cast<double>()};
} else if (self->tensor.dtype() == phi::DataType::INT32) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
value_obj_tmp.cast<int32_t>()};
values = std::vector<phi::Scalar>{value_obj_tmp.cast<int32_t>()};
} else if (self->tensor.dtype() == phi::DataType::INT64) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
value_obj_tmp.cast<int64_t>()};
values = std::vector<phi::Scalar>{value_obj_tmp.cast<int64_t>()};
} else if (self->tensor.dtype() == phi::DataType::BOOL) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
value_obj_tmp.cast<bool>()};
values = std::vector<phi::Scalar>{value_obj_tmp.cast<bool>()};
} else if (self->tensor.dtype() == phi::DataType::COMPLEX64) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
values = std::vector<phi::Scalar>{
value_obj_tmp.cast<std::complex<float>>()};
} else if (self->tensor.dtype() == phi::DataType::COMPLEX128) {
attrs["values"] = std::vector<paddle::experimental::Scalar>{
values = std::vector<phi::Scalar>{
value_obj_tmp.cast<std::complex<double>>()};
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand All @@ -1734,8 +1723,6 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
"float16, "
"please check the type of tensor."));
}
attrs["shape"] = std::vector<int64_t>{1};

} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Value type error. The assign value allows "
Expand All @@ -1748,25 +1735,46 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
// Release gil and do tracing
py::gil_scoped_release release;
// use inplace set_value_ operator
if (value_tensor.initialized() &&
(self->tensor.dtype() != value_tensor.dtype())) {
if (egr::Controller::Instance().GetAMPLevel() !=
paddle::imperative::AmpLevel::O0) {
paddle::small_vector<std::vector<paddle::Tensor>,
egr::kSlotSmallVectorSize>
tmps = {{self->tensor}, {value_tensor}};
auto amp_dtype = egr::GetAmpDestDtype("set_value", tmps);
self->tensor = egr::EagerAmpAutoCast(
self->tensor.name(), self->tensor, amp_dtype, "set_value");
value_tensor = egr::EagerAmpAutoCast(
value_tensor.name(), value_tensor, amp_dtype, "set_value");
}
if (value_tensor.initialized()) {
if (self->tensor.dtype() != value_tensor.dtype()) {
value_tensor = cast_ad_func(value_tensor, self->tensor.dtype());
if (egr::Controller::Instance().GetAMPLevel() !=
paddle::imperative::AmpLevel::O0) {
paddle::small_vector<std::vector<paddle::Tensor>,
egr::kSlotSmallVectorSize>
tmps = {{self->tensor}, {value_tensor}};
auto amp_dtype = egr::GetAmpDestDtype("set_value", tmps);
self->tensor = egr::EagerAmpAutoCast(
self->tensor.name(), self->tensor, amp_dtype, "set_value");
value_tensor = egr::EagerAmpAutoCast(
value_tensor.name(), value_tensor, amp_dtype, "set_value");
}
if (self->tensor.dtype() != value_tensor.dtype()) {
value_tensor = cast_ad_func(value_tensor, self->tensor.dtype());
}
}
const phi::distributed::ProcessMesh* mesh = nullptr;
if (InputsContainDistTensor(&mesh, self->tensor, value_tensor)) {
ConvertAllInputsToDistTensor(mesh, self->tensor, value_tensor);
}
self->tensor = set_value_with_tensor__ad_func(self->tensor,
value_tensor,
starts,
ends,
steps,
axes,
decrease_axes,
none_axes);
} else {
self->tensor = set_value__ad_func(self->tensor,
starts,
ends,
steps,
axes,
decrease_axes,
none_axes,
shape,
values);
}
self->tensor = set_value__dygraph_function(
self->tensor, value_tensor, {}, {}, {}, attrs);
}
if (PyCheckTensor(value_obj)) {
// pass the stop_gradient from value to tensor.
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/pybind/slice_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,13 +143,13 @@ static int _PySlice_GetIndices(PySliceObject* r,

static void ParseIndexingSlice(phi::DDim shape,
PyObject* _index,
std::vector<int>* slice_axes,
std::vector<int>* slice_starts,
std::vector<int>* slice_ends,
std::vector<int>* slice_strides,
std::vector<int>* decrease_axis,
std::vector<int>* none_axes,
std::vector<int>* infer_flags,
std::vector<int64_t>* slice_axes,
std::vector<int64_t>* slice_starts,
std::vector<int64_t>* slice_ends,
std::vector<int64_t>* slice_strides,
std::vector<int64_t>* decrease_axis,
std::vector<int64_t>* none_axes,
std::vector<int64_t>* infer_flags,
std::vector<int64_t>* list_select_idxs,
bool* list_select_flag) {
// We allow indexing by Integers, Slices, Ellipsis, None, tuples of those
Expand Down
22 changes: 22 additions & 0 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,28 @@
func : rrelu_grad
data_type : x

- backward_op : set_value_grad
forward : set_value (Tensor x, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes, int64_t[] shape, Scalar[] values) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta:
func: UnchangedInferMeta
param: [out_grad]
kernel:
func: assign
param: [out_grad]

- backward_op : set_value_with_tensor_grad
forward: set_value_with_tensor (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes) -> Tensor(out)
args : (Tensor values,Tensor out_grad, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(x_grad), Tensor(values_grad)
infer_meta:
func: SetValueGradInferMeta
param: [out_grad, values]
kernel:
func: set_value_grad
param: [out_grad, starts, ends, steps, axes, decrease_axes, none_axes]

- backward_op : slice_double_grad
forward : slice_grad (Tensor input, Tensor grad_out, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(grad_input)
args : (Tensor grad_input_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
Expand Down
22 changes: 22 additions & 0 deletions paddle/phi/api/yaml/legacy_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -961,6 +961,28 @@
intermediate : noise
backward : rrelu_grad

- op : set_value
args : (Tensor x, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes, int64_t[] shape, Scalar[] values)
output : Tensor(out)
inplace: (x -> out)
infer_meta :
func : SetValueInferMeta
param : [x]
kernel :
func : set_value
backward: set_value_grad

- op : set_value_with_tensor
args : (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(out)
inplace: (x -> out)
infer_meta:
func: SetValueInferMeta
param: [x]
kernel:
func: set_value_with_tensor
backward: set_value_with_tensor_grad

- op : slice
args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output : Tensor
Expand Down
33 changes: 24 additions & 9 deletions python/paddle/base/variable_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
import warnings
from functools import reduce

Expand Down Expand Up @@ -875,6 +874,7 @@ def _setitem_static(x, indices, values):
StartsTensorList = None
EndsTensorList = None
StepsTensorList = None
shape = None

if paddle.utils._contain_var(starts):
StartsTensorList = paddle.utils._convert_to_tensor_list(starts)
Expand Down Expand Up @@ -919,14 +919,29 @@ def _setitem_static(x, indices, values):

# step3.1: Only basic indexing, use OP set_value to set value.
if paddle.in_dynamic_mode():
return paddle._legacy_C_ops.set_value_(
x,
value_tensor,
StartsTensorList,
EndsTensorList,
StepsTensorList,
*itertools.chain.from_iterable(attrs.items()),
)
if value_tensor is None:
return paddle._C_ops.set_value_(
x,
starts,
ends,
steps,
axes,
decrease_axes,
none_axes,
shape,
values,
)
else:
return paddle._C_ops.set_value_with_tensor_(
x,
value_tensor,
starts,
ends,
steps,
axes,
decrease_axes,
none_axes,
)
else:
helper = paddle.base.layer_helper.LayerHelper(
'set_value', **locals()
Expand Down
4 changes: 4 additions & 0 deletions test/indexing/test_setitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,3 +503,7 @@ def test_indexing_is_multi_dim_list(self):
res = self.exe.run(fetch_list=[y.name])

np.testing.assert_allclose(res[0], np_data)


if __name__ == '__main__':
unittest.main()
Loading

0 comments on commit 8bc09e7

Please sign in to comment.