@@ -2964,6 +2964,94 @@ def space_to_depth(data, block_size, layout="NCHW"):
2964
2964
return _make .space_to_depth (data , block_size , layout )
2965
2965
2966
2966
2967
+ def adaptive_max_pool1d (data , output_size = None , layout = "NCW" ):
2968
+ r"""1D adaptive max pooling operator. This operator is experimental.
2969
+
2970
+ This operator takes data as input and does 1D max value calculation
2971
+ across each window represented by W.
2972
+
2973
+
2974
+ In the default case, where the data_layout is `NCW`
2975
+ a data Tensor with shape `(batch_size, in_channels, width)`,
2976
+ to produce an output Tensor with shape
2977
+ (batch_size, in_channels, output_width).
2978
+
2979
+ The pooling kernel and stride sizes are automatically chosen for
2980
+ desired output sizes.
2981
+
2982
+ For output_size:
2983
+ If this argument is not provided, input height and width will be used
2984
+ as output height and width.
2985
+
2986
+ If a single integer is provided for output_size, the output size is
2987
+ (N x C x output_size) for any input (NCW).
2988
+
2989
+ Parameters
2990
+ ----------
2991
+ data : tvm.relay.Expr
2992
+ The input data to the operator.
2993
+
2994
+ output_size : tuple of int. optional
2995
+ Output height and width.
2996
+
2997
+ layout : str, optional
2998
+ Layout of the input.
2999
+
3000
+ Returns
3001
+ -------
3002
+ result : tvm.relay.Expr
3003
+ The computed result.
3004
+ """
3005
+ output_size = [] or output_size
3006
+ if isinstance (output_size , int ):
3007
+ output_size = [output_size ]
3008
+ return _make .adaptive_max_pool1d (data , output_size , layout )
3009
+
3010
+
3011
+ def adaptive_avg_pool1d (data , output_size = None , layout = "NCW" ):
3012
+ r"""1D adaptive average pooling operator. This operator is experimental.
3013
+
3014
+ This operator takes data as input and does 1D average value calculation
3015
+ across each window represented by W.
3016
+
3017
+
3018
+ In the default case, where the data_layout is `NCW`
3019
+ a data Tensor with shape `(batch_size, in_channels, width)`,
3020
+ to produce an output Tensor with shape
3021
+ (batch_size, in_channels, output_width).
3022
+
3023
+ The pooling kernel and stride sizes are automatically chosen for
3024
+ desired output sizes.
3025
+
3026
+ For output_size:
3027
+ If this argument is not provided, input height and width will be used
3028
+ as output width.
3029
+
3030
+ If a single integer is provided for output_size, the output size is
3031
+ (N x C x output_size) for any input (NCW).
3032
+
3033
+ Parameters
3034
+ ----------
3035
+ data : tvm.relay.Expr
3036
+ The input data to the operator.
3037
+
3038
+ output_size : tuple of int. optional
3039
+ Output height and width.
3040
+
3041
+ layout : str, optional
3042
+ Layout of the input.
3043
+
3044
+ Returns
3045
+ -------
3046
+ result : tvm.relay.Expr
3047
+ The computed result.
3048
+ """
3049
+ output_size = [] or output_size
3050
+ if isinstance (output_size , int ):
3051
+ output_size = [output_size ]
3052
+ return _make .adaptive_avg_pool1d (data , output_size , layout )
3053
+
3054
+
2967
3055
def adaptive_max_pool2d (data , output_size = None , layout = "NCHW" ):
2968
3056
r"""2D adaptive max pooling operator. This operator is experimental.
2969
3057
@@ -3142,6 +3230,71 @@ def adaptive_avg_pool3d(data, output_size=None, layout="NCDHW"):
3142
3230
return _make .adaptive_avg_pool3d (data , output_size , layout )
3143
3231
3144
3232
3233
+ def global_max_pool1d (data , layout = "NCW" ):
3234
+ r"""1D global maximum pooling operator.
3235
+
3236
+ This operator takes data as input and does 1D max value calculation
3237
+ across each window represented by W.
3238
+
3239
+ In the default case, where the data_layout is `NCW`
3240
+ a data Tensor with shape `(batch_size, in_channels, width)`,
3241
+ to produce an output Tensor with the following rule:
3242
+
3243
+ with data of shape (b, c, w)
3244
+ .. math::
3245
+
3246
+ \mbox{out}(b, c, 1) = \max_{n=0, \ldots, w} \mbox{data}(b, c, n)
3247
+
3248
+ Parameters
3249
+ ----------
3250
+ data : tvm.relay.Expr
3251
+ The input data to the operator.
3252
+
3253
+ layout : str, optional
3254
+ Layout of the input.
3255
+
3256
+ Returns
3257
+ -------
3258
+ result : tvm.relay.Expr
3259
+ The computed result.
3260
+ """
3261
+ output_size = [1 ]
3262
+ return _make .adaptive_max_pool1d (data , output_size , layout )
3263
+
3264
+
3265
+ def global_avg_pool1d (data , layout = "NCW" ):
3266
+ r"""1D global average pooling operator.
3267
+
3268
+ This operator takes data as input and does 1D average value calculation
3269
+ across each window represented by W.
3270
+
3271
+ In the default case, where the data_layout is `NCW`
3272
+ a data Tensor with shape `(batch_size, in_channels, width)`,
3273
+ to produce an output Tensor with the following rule:
3274
+
3275
+ with data of shape (b, c, w)
3276
+
3277
+ .. math::
3278
+
3279
+ \mbox{out}(b, c, 1) = \frac{1}{w} \sum_{n=0}^{w-1} \mbox{data}(b, c, n)
3280
+
3281
+ Parameters
3282
+ ----------
3283
+ data : tvm.relay.Expr
3284
+ The input data to the operator.
3285
+
3286
+ layout : str, optional
3287
+ Layout of the input.
3288
+
3289
+ Returns
3290
+ -------
3291
+ result : tvm.relay.Expr
3292
+ The computed result.
3293
+ """
3294
+ output_size = [1 ]
3295
+ return _make .adaptive_avg_pool1d (data , output_size , layout )
3296
+
3297
+
3145
3298
def global_max_pool3d (data , layout = "NCDHW" ):
3146
3299
r"""3D global maximum pooling operator.
3147
3300
0 commit comments