Skip to content
Merged
79 changes: 79 additions & 0 deletions paddle/operators/concat_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/concat_op.h"
#include <vector>

namespace paddle {
namespace operators {
using framework::Tensor;

class ConcatOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();

PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1.");

auto out_dims = ins[0]->dims();
size_t in_zero_dims_size = out_dims.size();
for (size_t i = 1; i < n; i++) {
for (size_t j = 0; j < in_zero_dims_size; j++) {
if (j == axis) {
out_dims[axis] += ins[i]->dims()[j];
continue;
}
PADDLE_ENFORCE_EQ(out_dims[j], ins[i]->dims()[j],
"Input tensors should have the same "
"elements except the specify axis.")
}
}
out->Resize(out_dims);
}
};

class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input tensors of concat operator.").AsDuplicable();
AddOutput("Out", "the output tensor of concat operator.");
AddComment(R"DOC(
Join the input tensors along with the axis.
Examples:
Input[0] = [[1,2],[3,4]]
Input[1] = [[5,6]]
axis = 0
Output = [[1,2],
[3,4],
[5,6]]
)DOC");
AddAttr<int>("axis", "The axis which the inputs will be joined with.")
.SetDefault(0);
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(concat, ops::ConcatOp, ops::ConcatOpMaker)
REGISTER_OP_CPU_KERNEL(concat,
ops::ConcatKernel<paddle::platform::CPUPlace, float>)
19 changes: 19 additions & 0 deletions paddle/operators/concat_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#define EIGEN_USE_GPU
#include "paddle/operators/concat_op.h"

namespace ops = paddle::operators;
// TODO(Yancey1989) Add GPU kernel
64 changes: 64 additions & 0 deletions paddle/operators/concat_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <vector>
#include "paddle/framework/op_registry.h"

namespace paddle {
namespace operators {

template <typename Place, typename T>
class ConcatKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
size_t output_axis_dim = 0;
size_t before = 1, after = 1;
for (size_t i = 0; i < n; i++) {
output_axis_dim += ins[i]->dims()[axis];
}
auto& input_zero = ins[0];
for (int64_t i = 0; i < input_zero->dims().size(); i++) {
if (i == axis) {
continue;
}
if (i < axis) {
before *= input_zero->dims()[i];
} else {
after *= input_zero->dims()[i];
}
}
size_t output_offset = 0;
for (size_t i = 0; i < n; i++) {
auto& in = ins[i];
auto axis_dim = in->dims()[axis];
for (size_t j = 0; j < before; j++) {
size_t len = axis_dim * after * sizeof(T);
const T* src = in->data<T>() + axis_dim * after * j;
T* out_data = out->mutable_data<T>(platform::CPUPlace());
T* dest = out_data + output_offset + output_axis_dim * after * j;
memcpy(dest, src, len);
}
output_offset += axis_dim * after;
}
}
};

} // namespace operators
} // namespace paddle
1 change: 1 addition & 0 deletions paddle/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ USE_OP(minus);
USE_OP(cos_sim);
USE_CPU_ONLY_OP(gather);
USE_CPU_ONLY_OP(scatter);
USE_CPU_ONLY_OP(concat);
USE_OP(top_k);
USE_OP(squared_l2_distance);

Expand Down
5 changes: 2 additions & 3 deletions python/paddle/v2/framework/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ def __call__(self, *args, **kwargs):
if len(args) != 0:
raise ValueError("Only keyword arguments are supported.")
op_desc = framework_pb2.OpDesc()

for input_parameter in self.__op_proto__.inputs:
input_arguments = kwargs.get(input_parameter.name, [])
if is_str(input_arguments):
Expand Down Expand Up @@ -142,8 +141,8 @@ def __impl__(*args, **kwargs):
return OpInfo(
method=__impl__,
name=op_proto.type,
inputs=[var.name for var in op_proto.inputs],
outputs=[var.name for var in op_proto.outputs],
inputs=[(var.name, var.duplicable) for var in op_proto.inputs],
outputs=[(var.name, var.duplicable) for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs])


Expand Down
1 change: 1 addition & 0 deletions python/paddle/v2/framework/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,5 @@ py_test(test_gradient_checker SRCS test_gradient_checker.py)
py_test(test_lookup_table SRCS test_lookup_table.py)
py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py)
py_test(mnist SRCS mnist.py)
py_test(test_concat_op SRCS test_concat_op.py)
py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py)
5 changes: 2 additions & 3 deletions python/paddle/v2/framework/tests/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@
def create_op(op_type):
# TODO need to set attrs
kwargs = dict()
for in_name in Operator.get_op_input_names(op_type):
for in_name, _ in Operator.get_op_input_names(op_type):
kwargs[in_name] = in_name
for out_name in Operator.get_op_output_names(op_type):
for out_name, _ in Operator.get_op_output_names(op_type):
kwargs[out_name] = out_name

return Operator(op_type, **kwargs)


Expand Down
32 changes: 23 additions & 9 deletions python/paddle/v2/framework/tests/op_test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,30 @@ def test_all(self):
places.append(core.GPUPlace(0))

for place in places:
for in_name in Operator.get_op_input_names(self.type):
if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
for ins in Operator.get_op_input_names(self.type):
in_name = ins[0]
in_dup = ins[1]
if hasattr(self, 'inputs') and in_name in self.inputs:
kwargs[in_name] = []
if in_dup:
arrays = self.inputs[in_name]
for index, arr in enumerate(arrays):
var = scope.new_var(in_name + str(index))
tensor = var.get_tensor()
tensor.set_dims(arr.shape)
tensor.set(arr, place)
kwargs[in_name].append(in_name + str(index))
else:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
else:
kwargs[in_name] = "@EMPTY@"

for out_name in Operator.get_op_output_names(self.type):
for out_name, out_dup in Operator.get_op_output_names(
self.type):
if not hasattr(self, "outputs"):
raise ValueError(
"The test op must set self.outputs dict.")
Expand All @@ -60,7 +73,8 @@ def test_all(self):
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)

for out_name in Operator.get_op_output_names(self.type):
for out_name, out_dup in Operator.get_op_output_names(
self.type):
actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
Expand Down
22 changes: 22 additions & 0 deletions python/paddle/v2/framework/tests/test_concat_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta


class TestConcatOp(unittest.TestCase):
__metaclass__ = OpTestMeta

def setUp(self):
self.type = "concat"
x0 = np.random.random((2, 3, 2, 5)).astype('float32')
x1 = np.random.random((2, 3, 3, 5)).astype('float32')
x2 = np.random.random((2, 3, 4, 5)).astype('float32')
axis = 2
self.inputs = {'X': [x0, x1, x2]}
self.attrs = {'axis': axis}
self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)}


if __name__ == '__main__':
unittest.main()