Skip to content

Commit 5f67794

Browse files
committed
[TOP] Rename conv pool parameter back to 2d
1 parent 55592ec commit 5f67794

File tree

3 files changed

+42
-42
lines changed

3 files changed

+42
-42
lines changed

nnvm/include/nnvm/top/nn.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
101101
}
102102
};
103103

104-
struct ConvParam : public dmlc::Parameter<ConvParam> {
104+
struct Conv2DParam : public dmlc::Parameter<Conv2DParam> {
105105
int channels;
106106
TShape kernel_size;
107107
TShape strides;
@@ -111,7 +111,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
111111
int layout;
112112
bool use_bias;
113113

114-
DMLC_DECLARE_PARAMETER(ConvParam) {
114+
DMLC_DECLARE_PARAMETER(Conv2DParam) {
115115
DMLC_DECLARE_FIELD(channels)
116116
.describe("The dimensionality of the output space"
117117
"i.e. the number of output channels in the convolution.");
@@ -148,7 +148,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
148148
};
149149

150150

151-
struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
151+
struct Conv2DTransposeParam : public dmlc::Parameter<Conv2DTransposeParam> {
152152
int channels;
153153
TShape kernel_size;
154154
TShape strides;
@@ -159,7 +159,7 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
159159
int layout;
160160
bool use_bias;
161161

162-
DMLC_DECLARE_PARAMETER(ConvTransposeParam) {
162+
DMLC_DECLARE_PARAMETER(Conv2DTransposeParam) {
163163
DMLC_DECLARE_FIELD(channels)
164164
.describe("The dimensionality of the output space"
165165
"i.e. the number of output channels in the convolution.");
@@ -198,15 +198,15 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
198198
};
199199

200200

201-
struct PoolParam : public dmlc::Parameter<PoolParam> {
201+
struct Pool2DParam : public dmlc::Parameter<Pool2DParam> {
202202
TShape pool_size;
203203
TShape strides;
204204
TShape padding;
205205
int groups;
206206
int layout;
207207
bool ceil_mode;
208208

209-
DMLC_DECLARE_PARAMETER(PoolParam) {
209+
DMLC_DECLARE_PARAMETER(Pool2DParam) {
210210
DMLC_DECLARE_FIELD(pool_size)
211211
.describe("Size of the pooling windows..");
212212
DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
@@ -234,10 +234,10 @@ struct PoolParam : public dmlc::Parameter<PoolParam> {
234234
};
235235

236236

237-
struct GlobalPoolParam : public dmlc::Parameter<GlobalPoolParam> {
237+
struct GlobalPool2DParam : public dmlc::Parameter<GlobalPool2DParam> {
238238
int layout;
239239

240-
DMLC_DECLARE_PARAMETER(GlobalPoolParam) {
240+
DMLC_DECLARE_PARAMETER(GlobalPool2DParam) {
241241
DMLC_DECLARE_FIELD(layout)
242242
.add_enum("NCHW", kNCHW)
243243
.add_enum("NHWC", kNHWC)

nnvm/src/top/nn/convolution.cc

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@ namespace nnvm {
1515
namespace top {
1616

1717
// conv2d
18-
DMLC_REGISTER_PARAMETER(ConvParam);
18+
DMLC_REGISTER_PARAMETER(Conv2DParam);
1919

2020
inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
2121
std::vector<TShape>* in_shape,
2222
std::vector<TShape>* out_shape) {
23-
const ConvParam& param = nnvm::get<ConvParam>(attrs.parsed);
23+
const Conv2DParam& param = nnvm::get<Conv2DParam>(attrs.parsed);
2424
if (param.use_bias) {
2525
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, weight, bias]";
2626
} else {
@@ -51,10 +51,10 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
5151
wshape = ConvertLayout(wshape, kNCHW, param.layout);
5252
wshape[0] *= param.groups;
5353

54-
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvParam::kWeight, wshape);
54+
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DParam::kWeight, wshape);
5555
if (param.use_bias) {
5656
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape,
57-
ConvParam::kBias, TShape({param.channels}));
57+
Conv2DParam::kBias, TShape({param.channels}));
5858
}
5959
// dilation
6060
dim_t dilated_ksize_y = 1 + (param.kernel_size[0] - 1) * param.dilation[0];
@@ -79,7 +79,7 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
7979
if (oshape[3] && param.strides[1] == 1) {
8080
dshape[3] = oshape[3] + dilated_ksize_x - 1 - 2 * param.padding[1];
8181
}
82-
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvParam::kData,
82+
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DParam::kData,
8383
ConvertLayout(dshape, kNCHW, param.layout));
8484
// Check whether the kernel sizes are valid
8585
if (dshape[2] != 0) {
@@ -112,29 +112,29 @@ a bias vector is created and added to the outputs.
112112
.add_argument("data", "4D Tensor", "Input data.")
113113
.add_argument("weight", "4D Tensor", "Weight matrix.")
114114
.add_argument("bias", "1D Tensor", "Bias parameter.")
115-
.add_arguments(ConvParam::__FIELDS__())
116-
.set_attr_parser(ParamParser<ConvParam>)
115+
.add_arguments(Conv2DParam::__FIELDS__())
116+
.set_attr_parser(ParamParser<Conv2DParam>)
117117
.set_num_outputs(1)
118-
.set_num_inputs(UseBiasNumInputs<ConvParam>)
119-
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<ConvParam>)
118+
.set_num_inputs(UseBiasNumInputs<Conv2DParam>)
119+
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<Conv2DParam>)
120120
.set_attr<FInferShape>("FInferShape", Conv2DInferShape)
121121
.set_attr<FInferType>("FInferType", ElemwiseType<-1, 1>)
122122
.set_support_level(2);
123123

124124

125-
DMLC_REGISTER_PARAMETER(ConvTransposeParam);
125+
DMLC_REGISTER_PARAMETER(Conv2DTransposeParam);
126126

127-
inline bool ConvTransposeInferShape(const nnvm::NodeAttrs& attrs,
128-
std::vector<TShape>* in_shape,
129-
std::vector<TShape>* out_shape) {
130-
const ConvTransposeParam& param = nnvm::get<ConvTransposeParam>(attrs.parsed);
127+
inline bool Conv2DTransposeInferShape(const nnvm::NodeAttrs& attrs,
128+
std::vector<TShape>* in_shape,
129+
std::vector<TShape>* out_shape) {
130+
const Conv2DTransposeParam& param = nnvm::get<Conv2DTransposeParam>(attrs.parsed);
131131
if (param.use_bias) {
132132
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, weight, bias]";
133133
} else {
134134
CHECK_EQ(in_shape->size(), 2U) << "Input:[data, weight]";
135135
}
136136
CHECK_EQ(out_shape->size(), 1U);
137-
const TShape& dshape = (*in_shape)[ConvTransposeParam::kData];
137+
const TShape& dshape = (*in_shape)[Conv2DTransposeParam::kData];
138138
if (dshape.ndim() == 0) return false;
139139
TShape dshape_nchw = ConvertLayout(dshape, param.layout, kNCHW);
140140

@@ -154,11 +154,11 @@ inline bool ConvTransposeInferShape(const nnvm::NodeAttrs& attrs,
154154
param.kernel_size[0], param.kernel_size[1]});
155155
wshape = ConvertLayout(wshape, kNCHW, param.layout);
156156

157-
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvTransposeParam::kWeight, wshape);
157+
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DTransposeParam::kWeight, wshape);
158158

159159
if (param.use_bias) {
160160
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape,
161-
ConvTransposeParam::kBias,
161+
Conv2DTransposeParam::kBias,
162162
TShape({param.channels}));
163163
}
164164
// dilation
@@ -201,12 +201,12 @@ said convolution.
201201
.add_argument("data", "4D Tensor", "Input data.")
202202
.add_argument("weight", "4D Tensor", "Weight matrix.")
203203
.add_argument("bias", "1D Tensor", "Bias parameter.")
204-
.add_arguments(ConvTransposeParam::__FIELDS__())
205-
.set_attr_parser(ParamParser<ConvTransposeParam>)
204+
.add_arguments(Conv2DTransposeParam::__FIELDS__())
205+
.set_attr_parser(ParamParser<Conv2DTransposeParam>)
206206
.set_num_outputs(1)
207-
.set_num_inputs(UseBiasNumInputs<ConvTransposeParam>)
208-
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<ConvTransposeParam>)
209-
.set_attr<FInferShape>("FInferShape", ConvTransposeInferShape)
207+
.set_num_inputs(UseBiasNumInputs<Conv2DTransposeParam>)
208+
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<Conv2DTransposeParam>)
209+
.set_attr<FInferShape>("FInferShape", Conv2DTransposeInferShape)
210210
.set_attr<FInferType>("FInferType", ElemwiseType<-1, 1>)
211211
.set_support_level(2);
212212

nnvm/src/top/nn/pooling.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@
1414
namespace nnvm {
1515
namespace top {
1616

17-
DMLC_REGISTER_PARAMETER(PoolParam);
17+
DMLC_REGISTER_PARAMETER(Pool2DParam);
1818

1919
inline bool Pool2DInferShape(const nnvm::NodeAttrs& attrs,
2020
std::vector<TShape>* in_shape,
2121
std::vector<TShape>* out_shape) {
22-
const PoolParam& param = nnvm::get<PoolParam>(attrs.parsed);
22+
const Pool2DParam& param = nnvm::get<Pool2DParam>(attrs.parsed);
2323
CHECK_EQ(in_shape->size(), 1U);
2424
CHECK_EQ(out_shape->size(), 1U);
2525

@@ -68,8 +68,8 @@ NNVM_REGISTER_OP(max_pool2d)
6868
6969
)code" NNVM_ADD_FILELINE)
7070
.add_argument("data", "4D Tensor", "Input data.")
71-
.add_arguments(PoolParam::__FIELDS__())
72-
.set_attr_parser(ParamParser<PoolParam>)
71+
.add_arguments(Pool2DParam::__FIELDS__())
72+
.set_attr_parser(ParamParser<Pool2DParam>)
7373
.set_num_outputs(1)
7474
.set_num_inputs(1)
7575
.set_attr<FInferShape>("FInferShape", Pool2DInferShape)
@@ -92,21 +92,21 @@ NNVM_REGISTER_OP(avg_pool2d)
9292
9393
)code" NNVM_ADD_FILELINE)
9494
.add_argument("data", "4D Tensor", "Input data.")
95-
.add_arguments(PoolParam::__FIELDS__())
96-
.set_attr_parser(ParamParser<PoolParam>)
95+
.add_arguments(Pool2DParam::__FIELDS__())
96+
.set_attr_parser(ParamParser<Pool2DParam>)
9797
.set_num_outputs(1)
9898
.set_num_inputs(1)
9999
.set_attr<FInferShape>("FInferShape", Pool2DInferShape)
100100
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
101101
.set_support_level(2);
102102

103103

104-
DMLC_REGISTER_PARAMETER(GlobalPoolParam);
104+
DMLC_REGISTER_PARAMETER(GlobalPool2DParam);
105105

106106
inline bool GlobalPool2DInferShape(const nnvm::NodeAttrs& attrs,
107107
std::vector<TShape>* in_shape,
108108
std::vector<TShape>* out_shape) {
109-
const GlobalPoolParam& param = nnvm::get<GlobalPoolParam>(attrs.parsed);
109+
const GlobalPool2DParam& param = nnvm::get<GlobalPool2DParam>(attrs.parsed);
110110
CHECK_EQ(in_shape->size(), 1U);
111111
CHECK_EQ(out_shape->size(), 1U);
112112
TShape dshape = (*in_shape)[0];
@@ -129,8 +129,8 @@ NNVM_REGISTER_OP(global_max_pool2d)
129129
130130
)code" NNVM_ADD_FILELINE)
131131
.add_argument("data", "4D Tensor", "Input data.")
132-
.add_arguments(GlobalPoolParam::__FIELDS__())
133-
.set_attr_parser(ParamParser<GlobalPoolParam>)
132+
.add_arguments(GlobalPool2DParam::__FIELDS__())
133+
.set_attr_parser(ParamParser<GlobalPool2DParam>)
134134
.set_num_outputs(1)
135135
.set_num_inputs(1)
136136
.set_attr<FInferShape>("FInferShape", GlobalPool2DInferShape)
@@ -148,8 +148,8 @@ NNVM_REGISTER_OP(global_avg_pool2d)
148148
149149
)code" NNVM_ADD_FILELINE)
150150
.add_argument("data", "4D Tensor", "Input data.")
151-
.add_arguments(GlobalPoolParam::__FIELDS__())
152-
.set_attr_parser(ParamParser<GlobalPoolParam>)
151+
.add_arguments(GlobalPool2DParam::__FIELDS__())
152+
.set_attr_parser(ParamParser<GlobalPool2DParam>)
153153
.set_num_outputs(1)
154154
.set_num_inputs(1)
155155
.set_attr<FInferShape>("FInferShape", GlobalPool2DInferShape)

0 commit comments

Comments
 (0)