Skip to content

Commit

Permalink
Fix cpplint errors in transform_test.cu (PaddlePaddle#9915)
Browse files Browse the repository at this point in the history
* Fix cpplint errors with transformer_test.cu

* Update
  • Loading branch information
wangkuiyi authored Apr 14, 2018
1 parent b668938 commit b48cf17
Showing 1 changed file with 19 additions and 12 deletions.
31 changes: 19 additions & 12 deletions paddle/fluid/platform/transform_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@ limitations under the License. */
#include "paddle/fluid/platform/hostdevice.h"
#include "paddle/fluid/platform/transform.h"

namespace {

template <typename T>
class Scale {
public:
explicit Scale(const T& scale) : scale_(scale) {}

HOSTDEVICE T operator()(const T& a) const { return a * scale_; }

private:
Expand All @@ -35,26 +36,36 @@ class Multiply {
HOSTDEVICE T operator()(const T& a, const T& b) const { return a * b; }
};

} // namespace

using paddle::memory::Alloc;
using paddle::memory::Free;
using paddle::memory::Copy;

using paddle::platform::CPUPlace;
using paddle::platform::CUDAPlace;
using paddle::platform::CPUDeviceContext;
using paddle::platform::CUDADeviceContext;

using paddle::platform::Transform;

TEST(Transform, CPUUnary) {
using namespace paddle::platform;
CPUDeviceContext ctx;
float buf[4] = {0.1, 0.2, 0.3, 0.4};
Transform<paddle::platform::CPUDeviceContext> trans;
Transform<CPUDeviceContext> trans;
trans(ctx, buf, buf + 4, buf, Scale<float>(10));
for (int i = 0; i < 4; ++i) {
ASSERT_NEAR(buf[i], static_cast<float>(i + 1), 1e-5);
}
}

TEST(Transform, GPUUnary) {
using namespace paddle::platform;
using namespace paddle::memory;
CUDAPlace gpu0(0);
CUDADeviceContext ctx(gpu0);
float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4};
float* gpu_buf = static_cast<float*>(Alloc(gpu0, sizeof(float) * 4));
Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf), ctx.stream());
Transform<paddle::platform::CUDADeviceContext> trans;
Transform<CUDADeviceContext> trans;
trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale<float>(10));
ctx.Wait();
Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf), ctx.stream());
Expand All @@ -65,10 +76,8 @@ TEST(Transform, GPUUnary) {
}

TEST(Transform, CPUBinary) {
using namespace paddle::platform;
using namespace paddle::memory;
int buf[4] = {1, 2, 3, 4};
Transform<paddle::platform::CPUDeviceContext> trans;
Transform<CPUDeviceContext> trans;
CPUDeviceContext ctx;
trans(ctx, buf, buf + 4, buf, buf, Multiply<int>());
for (int i = 0; i < 4; ++i) {
Expand All @@ -77,14 +86,12 @@ TEST(Transform, CPUBinary) {
}

TEST(Transform, GPUBinary) {
using namespace paddle::platform;
using namespace paddle::memory;
int buf[4] = {1, 2, 3, 4};
CUDAPlace gpu0(0);
CUDADeviceContext ctx(gpu0);
int* gpu_buf = static_cast<int*>(Alloc(gpu0, sizeof(buf)));
Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf), ctx.stream());
Transform<paddle::platform::CUDADeviceContext> trans;
Transform<CUDADeviceContext> trans;
trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply<int>());
ctx.Wait();
Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf), ctx.stream());
Expand Down

0 comments on commit b48cf17

Please sign in to comment.