Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test nn.AdaptiveAvgPoolXd #5615

Merged
merged 4 commits into from
Jul 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 23 additions & 28 deletions oneflow/python/nn/modules/adaptive_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,23 @@
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.common_types import _size_1_t
from oneflow.python.nn.modules.utils import _single, _pair, _triple


def _generate_output_size(input_size, output_size):
new_output_size = []
if isinstance(output_size, int):
for _ in range(len(input_size) - 2):
new_output_size.append(output_size)
elif isinstance(output_size, tuple):
assert len(input_size) - 2 == len(
output_size
), f"The length of 'output_size' does not match the input size, {len(input_size) - 2} expected"
for i in range(len(output_size)):
if output_size[i] is None:
new_output_size.append(input_size[i + 2])
else:
assert isinstance(
output_size[i], int
), "numbers in 'output_size' should be integer"
new_output_size.append(output_size[i])
else:
raise ValueError("invalid 'output_size', 'int' or 'tuple' expected")
assert len(input_size) - 2 == len(
output_size
), f"the length of 'output_size' does not match the input size, {len(input_size) - 2} expected"
for i in range(len(output_size)):
if output_size[i] is None:
new_output_size.append(input_size[i + 2])
else:
assert isinstance(
output_size[i], int
), "numbers in 'output_size' should be integer"
new_output_size.append(output_size[i])
return tuple(new_output_size)


Expand Down Expand Up @@ -68,19 +64,16 @@ class AdaptiveAvgPool1d(Module):

"""

def __init__(self, output_size) -> None:
def __init__(self, output_size: _size_1_t) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _single(output_size)

def forward(self, x):
assert len(x.shape) == 3
if isinstance(self.output_size, tuple):
new_output_size = self.output_size[0]
elif isinstance(self.output_size, int):
new_output_size = self.output_size
else:
raise ValueError("'output_size' should be integer or tuple")
return flow.F.adaptive_avg_pool1d(x, output_size=(new_output_size,))
assert len(self.output_size) == 1, f"the length of 'output_size' does not match the input size, 1 expected"
assert isinstance(self.output_size[0], int), "numbers in 'output_size' should be integer"
return flow.F.adaptive_avg_pool1d(x, output_size=self.output_size)


@oneflow_export("adaptive_avg_pool1d")
Expand Down Expand Up @@ -142,7 +135,8 @@ class AdaptiveAvgPool2d(Module):

def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _pair(output_size)

def forward(self, x):
assert len(x.shape) == 4
Expand Down Expand Up @@ -209,7 +203,8 @@ class AdaptiveAvgPool3d(Module):

def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _triple(output_size)

def forward(self, x):
assert len(x.shape) == 5
Expand Down
13 changes: 10 additions & 3 deletions oneflow/python/test/modules/test_adaptive_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,17 @@
import numpy as np

import oneflow.experimental as flow
from oneflow.python.nn.common_types import _size_1_t
from typing import Union, Tuple

from test_util import GenArgList
from automated_test_util import *

NoneType = type(None)
# Not the same as those in PyTorch because 'output_size' cannot be NoneType (even in 'torch.nn.AdaptiveAvgPoolXd')
_size_2_opt_t_not_none = Union[int, Tuple[Union[int, NoneType], Union[int, NoneType]]]
_size_3_opt_t_not_none = Union[int, Tuple[Union[int, NoneType], Union[int, NoneType], Union[int, NoneType]]]

# TODO: auto test


Expand Down Expand Up @@ -864,7 +871,7 @@ def _test_adaptive_avgpool3d_dhw_backward(test_case, device):
class TestAdaptiveAvgPool(flow.unittest.TestCase):
@autotest()
def test_adaptive_avgpool1d(test_case):
m = torch.nn.AdaptiveAvgPool1d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool1d(output_size=random().to(_size_1_t))
m.train(random())
device = random_device()
m.to(device)
Expand All @@ -874,7 +881,7 @@ def test_adaptive_avgpool1d(test_case):

@autotest()
def test_adaptive_avgpool2d(test_case):
m = torch.nn.AdaptiveAvgPool2d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool2d(output_size=random().to(_size_2_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
Expand All @@ -884,7 +891,7 @@ def test_adaptive_avgpool2d(test_case):

@autotest()
def test_adaptive_avgpool3d(test_case):
m = torch.nn.AdaptiveAvgPool3d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool3d(output_size=random().to(_size_3_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
Expand Down
3 changes: 3 additions & 0 deletions oneflow/python/test_utils/automated_test_util/generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import torch

py_tuple = tuple
NoneType = type(None)

TEST_MODULE = 0
TEST_FLOW = 1
Expand Down Expand Up @@ -247,6 +248,8 @@ def _generate(self, annotation):
val = float(rng.random() * (high - low) + low)
elif annotation == bool:
val = random_util.choice([True, False])
elif annotation == NoneType:
val = None
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random"
Expand Down
8 changes: 2 additions & 6 deletions oneflow/user/kernels/adaptive_pool_cpu_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,7 @@ class AdaptivePool3DCpuGradKernel final : public user_op::OpKernel {
#define REGISTER_ADAPTIVE_POOL_KERNEL_WITH_DEVICE(device) \
REGISTER_ADAPTIVE_POOL_KERNEL(device, float) \
REGISTER_ADAPTIVE_POOL_KERNEL(device, double) \
REGISTER_ADAPTIVE_POOL_KERNEL(device, int8_t) \
REGISTER_ADAPTIVE_POOL_KERNEL(device, int32_t) \
REGISTER_ADAPTIVE_POOL_KERNEL(device, int64_t)
REGISTER_ADAPTIVE_POOL_KERNEL(device, int)

REGISTER_ADAPTIVE_POOL_KERNEL_WITH_DEVICE(DeviceType::kCPU)

Expand All @@ -257,9 +255,7 @@ REGISTER_ADAPTIVE_POOL_KERNEL_WITH_DEVICE(DeviceType::kCPU)
#define REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL_WITH_DEVICE(device) \
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, float) \
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, double) \
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, int8_t) \
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, int32_t) \
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, int64_t)
REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL(device, int)

REGISTER_ADAPTIVE_POOL_BACKWARD_KERNEL_WITH_DEVICE(DeviceType::kCPU)
} // namespace oneflow
13 changes: 6 additions & 7 deletions oneflow/user/kernels/adaptive_pool_gpu_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
#include "oneflow/core/kernel/kernel_util.cuh"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/core/operator/operator_util.h"
#include "oneflow/user/utils/pool_util.h"

Expand Down Expand Up @@ -122,7 +123,9 @@ __global__ void AdaptiveAvgPoolGradCudaKernel(T* input, const T* output, int num
input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w;
for (int id = 0; id < k_d; ++id) {
for (int ih = 0; ih < k_h; ++ih) {
for (int iw = 0; iw < k_w; ++iw) { *(input_ptr + ih * in_w + iw) += grad_delta; }
for (int iw = 0; iw < k_w; ++iw) {
cuda::atomic::Add(input_ptr + ih * in_w + iw, grad_delta);
}
}
input_ptr += in_h * in_w; // next input depth
}
Expand Down Expand Up @@ -258,9 +261,7 @@ class GpuAdaptiveAvgPool3dGradKernel final : public OpKernel {

REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, float);
REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, double);
REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int8_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int32_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int64_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int);

#define REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(device, dtype) \
REGISTER_USER_KERNEL("adaptive_avg_pool1d_grad") \
Expand All @@ -278,9 +279,7 @@ REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int64_t);

REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, float);
REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, double);
REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int8_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int32_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int64_t);
REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int);

} // namespace user_op

Expand Down