diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d105ea892ccf2..06e49b8a25f11 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -73,8 +73,6 @@ 'softmax', 'pool2d', 'pool3d', - 'adaptive_pool2d', - 'adaptive_pool3d', 'batch_norm', 'inplace_abn', 'instance_norm', @@ -2518,320 +2516,6 @@ def is_list_or_tuple(ele): return pool_out -@deprecated(since="2.0.0") -@templatedoc(op_type="pool2d") -def adaptive_pool2d( - input, pool_size, pool_type="max", require_index=False, name=None -): - r""" - - This operation calculates the output based on the input, pool_size, - pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch - size, C is the number of channels, H is the height of the feature, and W is - the width of the feature. Parameters(pool_size) should contain two elements which - represent height and width, respectively. Also the H and W dimensions of output(Out) - is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]] - - For average adaptive pool2d: - - .. math:: - - hstart &= floor(i * H_{in} / H_{out}) - - hend &= ceil((i + 1) * H_{in} / H_{out}) - - wstart &= floor(j * W_{in} / W_{out}) - - wend &= ceil((j + 1) * W_{in} / W_{out}) - - Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} - - Args: - input (Tensor): The input tensor of pooling operator, which is a 4-D tensor - with shape [N, C, H, W]. The format of input tensor is NCHW, - where N is batch size, C is the number of channels, H is the - height of the feature, and W is the width of the feature. - The data type is float32 or float64. - pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, - it must contain two integers, (pool_size_Height, pool_size_Width). - pool_type: ${pooling_type_comment} - require_index (bool): If true, the index of max pooling point will be returned along - with outputs. It cannot be set in average pooling type. Default False. - name(str, optional): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and - None by default. - - Returns: - Tensor: The output tensor of adaptive pooling result. The data type is same - as input tensor. - - Raises: - ValueError: 'pool_type' is not 'max' nor 'avg'. - ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'. - ValueError: 'pool_size' should be a list or tuple with length as 2. - - Examples: - .. code-block:: python - - # average adaptive pool2d - # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], - # output shape is [N, C, m, n], adaptive pool divide H and W dimensions - # of input data into m * n grids averagely and performs poolings in each - # grid to get output. - # adaptive average pool performs calculations as follow: - # - # for i in range(m): - # for j in range(n): - # hstart = floor(i * H / m) - # hend = ceil((i + 1) * H / m) - # wstart = floor(i * W / n) - # wend = ceil((i + 1) * W / n) - # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) - # - import paddle - paddle.enable_static() - data = paddle.rand(shape=[1,3,32,32]) - pool_out = paddle.fluid.layers.adaptive_pool2d( - input=data, - pool_size=[3, 3], - pool_type='avg') - - # max adaptive pool2d - # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], - # output shape is [N, C, m, n], adaptive pool divide H and W dimensions - # of input data into m * n grids averagely and performs poolings in each - # grid to get output. - # adaptive average pool performs calculations as follow: - # - # for i in range(m): - # for j in range(n): - # hstart = floor(i * H / m) - # hend = ceil((i + 1) * H / m) - # wstart = floor(i * W / n) - # wend = ceil((i + 1) * W / n) - # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) - # - import paddle - data = paddle.rand(shape=[1,3,32,32]) - pool_out = paddle.fluid.layers.adaptive_pool2d( - input=data, - pool_size=[3, 3], - pool_type='max') - """ - check_variable_and_dtype( - input, - 'input', - ['float16', 'float32', 'float64', 'int32', 'int64'], - 'adaptive_pool2d', - ) - check_type(pool_type, 'pool_type', str, 'adaptive_pool2d') - check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d') - check_type(require_index, 'require_index', bool, 'adaptive_pool2d') - if pool_type not in ["max", "avg"]: - raise ValueError( - "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type), - ) - - if pool_type == "avg" and require_index: - raise ValueError( - "invalid setting 'require_index' true when 'pool_type' is 'avg'." - ) - - pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') - - if pool_type == "max": - l_type = 'max_pool2d_with_index' - else: - l_type = "pool2d" - - helper = LayerHelper(l_type, **locals()) - dtype = helper.input_dtype() - pool_out = helper.create_variable_for_type_inference(dtype) - - outputs = {"Out": pool_out} - if pool_type == "max": - mask = helper.create_variable_for_type_inference(dtype) - outputs["Mask"] = mask - - helper.append_op( - type=l_type, - inputs={"X": input}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }, - ) - - return (pool_out, mask) if require_index else pool_out - - -@deprecated(since="2.0.0") -@templatedoc(op_type="pool3d") -def adaptive_pool3d( - input, pool_size, pool_type="max", require_index=False, name=None -): - r""" - - This operation calculates the output based on the input, pool_size, - pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch - size, C is the number of channels, D is the depth of the feature, H is the height of - the feature, and W is the width of the feature. Parameters(pool_size) should contain - three elements which represent height and width, respectively. Also the D, H and W - dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape - will be [N, C, pool_size[0], pool_size[1], pool_size[2]] - - For average adaptive pool3d: - - .. math:: - - dstart &= floor(i * D_{in} / D_{out}) - - dend &= ceil((i + 1) * D_{in} / D_{out}) - - hstart &= floor(j * H_{in} / H_{out}) - - hend &= ceil((j + 1) * H_{in} / H_{out}) - - wstart &= floor(k * W_{in} / W_{out}) - - wend &= ceil((k + 1) * W_{in} / W_{out}) - - Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} - - Args: - input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with - shape [N, C, D, H, W]. The format of input tensor is NCDHW, where - N is batch size, C is the number of channels, D is the depth of the feature, - H is the height of the feature, and W is the width of the feature. - The data type is float32 or float64. - pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, - it must contain three integers, (Depth, Height, Width). - pool_type: ${pooling_type_comment} - require_index (bool): If true, the index of max pooling point will be returned along - with outputs. It cannot be set in average pooling type. Default False. - name(str, optional): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and - None by default. - - Returns: - Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor. - - Raises: - ValueError: 'pool_type' is not 'max' nor 'avg'. - ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'. - ValueError: 'pool_size' should be a list or tuple with length as 2. - - Examples: - .. code-block:: python - - # average adaptive pool3d - # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], - # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions - # of input data into l * m * n grids averagely and performs poolings in each - # grid to get output. - # adaptive average pool performs calculations as follow: - # - # for i in range(l): - # for j in range(m): - # for k in range(n): - # dstart = floor(i * D / l) - # dend = ceil((i + 1) * D / l) - # hstart = floor(j * H / m) - # hend = ceil((j + 1) * H / m) - # wstart = floor(k * W / n) - # wend = ceil((k + 1) * W / n) - # output[:, :, i, j, k] = - # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) - # - - import paddle - paddle.enable_static() - data = paddle.rand(shape=[1,3,32,32,32]) - pool_out = paddle.fluid.layers.adaptive_pool3d( - input=data, - pool_size=[3, 3, 3], - pool_type='avg') - - # max adaptive pool3d - # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], - # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions - # of input data into l * m * n grids averagely and performs poolings in each - # grid to get output. - # adaptive average pool performs calculations as follow: - # - # for i in range(l): - # for j in range(m): - # for k in range(n): - # dstart = floor(i * D / l) - # dend = ceil((i + 1) * D / l) - # hstart = floor(j * H / m) - # hend = ceil((j + 1) * H / m) - # wstart = floor(k * W / n) - # wend = ceil((k + 1) * W / n) - # output[:, :, i, j, k] = - # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) - # - - import paddle - data = paddle.rand(shape=[1,3,32,32,32]) - pool_out = paddle.fluid.layers.adaptive_pool3d( - input=data, - pool_size=[3, 3, 3], - pool_type='max') - """ - check_variable_and_dtype( - input, - 'input', - ['float16', 'float32', 'float64', 'int32', 'int64'], - 'adaptive_pool3d', - ) - check_type(pool_type, 'pool_type', str, 'adaptive_pool3d') - check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d') - check_type(require_index, 'require_index', bool, 'adaptive_pool3d') - if pool_type not in ["max", "avg"]: - raise ValueError( - "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type), - ) - - if pool_type == "avg" and require_index: - raise ValueError( - "invalid setting 'require_index' true when 'pool_type' is 'avg'." - ) - - pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') - - if pool_type == "max": - l_type = 'max_pool3d_with_index' - else: - l_type = "pool3d" - - helper = LayerHelper(l_type, **locals()) - dtype = helper.input_dtype() - pool_out = helper.create_variable_for_type_inference(dtype) - - outputs = {"Out": pool_out} - if pool_type == "max": - mask = helper.create_variable_for_type_inference(dtype) - outputs["Mask"] = mask - - helper.append_op( - type=l_type, - inputs={"X": input}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }, - ) - - return (pool_out, mask) if require_index else pool_out - - def batch_norm( input, act=None, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py index 058e3b30a5315..88104fa253af1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py @@ -138,22 +138,5 @@ def set_attrs(self): self.attrs['exclusive'] = False -class TestAdaptive(TestBase): - def set_op_attrs(self): - self.attrs = { - "pool_size": 1, - "pool_type": 'avg', - "require_index": False, - } - - @IPUOpTest.static_graph - def build_model(self): - x = paddle.static.data( - name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' - ) - out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) - self.fetch_list = [out.name] - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py index aff790a775a9f..3fa93cc89dfab 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py @@ -137,22 +137,5 @@ def set_op_attrs(self): self.attrs['exclusive'] = False -class TestAdaptive(TestBase): - def set_op_attrs(self): - self.attrs = { - "pool_size": 1, - "pool_type": 'max', - "require_index": False, - } - - @IPUOpTest.static_graph - def build_model(self): - x = paddle.static.data( - name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' - ) - out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) - self.fetch_list = [out.name] - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 328f719d40537..78d01722b8121 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -3299,38 +3299,6 @@ def make_pool3d(self): pool_padding=(2, 1, 1), ) - def make_adaptive_pool2d(self): - with program_guard( - fluid.default_main_program(), fluid.default_startup_program() - ): - x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32') - return layers.adaptive_pool2d(x, [3, 3], pool_type='avg') - pool, mask = layers.adaptive_pool2d(x, [3, 3], require_index=True) - return pool - return mask - return layers.adaptive_pool2d(x, 3, pool_type='avg') - pool, mask = layers.adaptive_pool2d(x, 3, require_index=True) - return pool - return mask - - def make_adaptive_pool3d(self): - with program_guard( - fluid.default_main_program(), fluid.default_startup_program() - ): - x = self._get_data( - name='x', shape=[3, 244, 224, 224], dtype='float32' - ) - return layers.adaptive_pool3d(x, [3, 3, 3], pool_type='avg') - pool, mask = layers.adaptive_pool3d( - x, [3, 3, 3], require_index=True - ) - return pool - return mask - return layers.adaptive_pool3d(x, 3, pool_type='avg') - pool, mask = layers.adaptive_pool3d(x, 3, require_index=True) - return pool - return mask - def make_lstm_unit(self): with program_guard( fluid.default_main_program(), fluid.default_startup_program()