|
12 | 12 | namespace nnvm {
|
13 | 13 | namespace top {
|
14 | 14 |
|
| 15 | +enum LayoutFlag { |
| 16 | + kNCHW = 0, |
| 17 | + kNHWC, |
| 18 | + kCHWN, |
| 19 | + |
| 20 | + kNCW = 1 << 3, |
| 21 | + kNWC, |
| 22 | + kCWN, |
| 23 | + |
| 24 | + kNCDHW = 1 << 5, |
| 25 | + kNDHWC, |
| 26 | + kCDHWN |
| 27 | +}; |
| 28 | + |
15 | 29 | struct DenseParam : public dmlc::Parameter<DenseParam> {
|
16 | 30 | int units;
|
17 | 31 | bool use_bias;
|
@@ -85,6 +99,145 @@ struct LogSoftmaxParam : public dmlc::Parameter<LogSoftmaxParam> {
|
85 | 99 | }
|
86 | 100 | };
|
87 | 101 |
|
| 102 | +struct Conv2DParam : public dmlc::Parameter<Conv2DParam> { |
| 103 | + int channels; |
| 104 | + TShape kernel_size; |
| 105 | + TShape strides; |
| 106 | + TShape padding; |
| 107 | + TShape dilation; |
| 108 | + int groups; |
| 109 | + int layout; |
| 110 | + bool use_bias; |
| 111 | + |
| 112 | + DMLC_DECLARE_PARAMETER(Conv2DParam) { |
| 113 | + DMLC_DECLARE_FIELD(channels) |
| 114 | + .describe("The dimensionality of the output space" |
| 115 | + "i.e. the number of output channels in the convolution."); |
| 116 | + DMLC_DECLARE_FIELD(kernel_size) |
| 117 | + .describe("Specifies the dimensions of the convolution window."); |
| 118 | + DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1})) |
| 119 | + .describe("Specifies the strides of the convolution."); |
| 120 | + DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0})) |
| 121 | + .describe("If padding is non-zero, then the input is implicitly zero-padded" |
| 122 | + "on both sides for padding number of points"); |
| 123 | + DMLC_DECLARE_FIELD(dilation).set_default(TShape({1, 1})) |
| 124 | + .describe("Specifies the dilation rate to use for dilated convolution."); |
| 125 | + DMLC_DECLARE_FIELD(groups).set_default(1) |
| 126 | + .describe("Controls the connections between inputs and outputs." |
| 127 | + "At groups=1, all inputs are convolved to all outputs." |
| 128 | + "At groups=2, the operation becomes equivalent to having two convolution" |
| 129 | + "layers side by side, each seeing half the input channels, and producing" |
| 130 | + "half the output channels, and both subsequently concatenated."); |
| 131 | + DMLC_DECLARE_FIELD(layout) |
| 132 | + .add_enum("NCHW", kNCHW) |
| 133 | + .add_enum("NHWC", kNHWC) |
| 134 | + .set_default(kNCHW) |
| 135 | + .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." |
| 136 | + "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" |
| 137 | + "dimensions respectively. Convolution is applied on the 'H' and" |
| 138 | + "'W' dimensions."); |
| 139 | + DMLC_DECLARE_FIELD(use_bias).set_default(true) |
| 140 | + .describe("Whether the layer uses a bias vector."); |
| 141 | + } |
| 142 | +}; |
| 143 | + |
| 144 | + |
| 145 | +struct Conv2DTransposeParam : public dmlc::Parameter<Conv2DTransposeParam> { |
| 146 | + int channels; |
| 147 | + TShape kernel_size; |
| 148 | + TShape strides; |
| 149 | + TShape padding; |
| 150 | + TShape output_padding; |
| 151 | + TShape dilation; |
| 152 | + int groups; |
| 153 | + int layout; |
| 154 | + bool use_bias; |
| 155 | + |
| 156 | + DMLC_DECLARE_PARAMETER(Conv2DTransposeParam) { |
| 157 | + DMLC_DECLARE_FIELD(channels) |
| 158 | + .describe("The dimensionality of the output space" |
| 159 | + "i.e. the number of output channels in the convolution."); |
| 160 | + DMLC_DECLARE_FIELD(kernel_size) |
| 161 | + .describe("Specifies the dimensions of the convolution window."); |
| 162 | + DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1})) |
| 163 | + .describe("Specifies the strides of the convolution."); |
| 164 | + DMLC_DECLARE_FIELD(output_padding).set_default(TShape({0, 0})) |
| 165 | + .describe("Zero-padding added to one side of the output."); |
| 166 | + DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0})) |
| 167 | + .describe("If padding is non-zero, then the input is implicitly zero-padded" |
| 168 | + "on both sides for padding number of points"); |
| 169 | + DMLC_DECLARE_FIELD(dilation).set_default(TShape({1, 1})) |
| 170 | + .describe("Specifies the dilation rate to use for dilated convolution."); |
| 171 | + DMLC_DECLARE_FIELD(groups).set_default(1) |
| 172 | + .describe("Controls the connections between inputs and outputs." |
| 173 | + "At groups=1, all inputs are convolved to all outputs." |
| 174 | + "At groups=2, the operation becomes equivalent to having two convolution" |
| 175 | + "layers side by side, each seeing half the input channels, and producing" |
| 176 | + "half the output channels, and both subsequently concatenated."); |
| 177 | + DMLC_DECLARE_FIELD(layout) |
| 178 | + .add_enum("NCHW", kNCHW) |
| 179 | + .add_enum("NHWC", kNHWC) |
| 180 | + .set_default(kNCHW) |
| 181 | + .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." |
| 182 | + "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" |
| 183 | + "dimensions respectively. Convolution is applied on the 'H' and" |
| 184 | + "'W' dimensions."); |
| 185 | + DMLC_DECLARE_FIELD(use_bias).set_default(true) |
| 186 | + .describe("Whether the layer uses a bias vector."); |
| 187 | + } |
| 188 | +}; |
| 189 | + |
| 190 | +struct Pool2DParam : public dmlc::Parameter<Pool2DParam> { |
| 191 | + TShape pool_size; |
| 192 | + TShape strides; |
| 193 | + TShape padding; |
| 194 | + int groups; |
| 195 | + int layout; |
| 196 | + bool ceil_mode; |
| 197 | + |
| 198 | + DMLC_DECLARE_PARAMETER(Pool2DParam) { |
| 199 | + DMLC_DECLARE_FIELD(pool_size) |
| 200 | + .describe("Size of the pooling windows.."); |
| 201 | + DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1})) |
| 202 | + .describe("Specifies the strides of the convolution."); |
| 203 | + DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0})) |
| 204 | + .describe("If padding is non-zero, then the input is implicitly zero-padded" |
| 205 | + "on both sides for padding number of points"); |
| 206 | + DMLC_DECLARE_FIELD(groups).set_default(1) |
| 207 | + .describe("Controls the connections between inputs and outputs." |
| 208 | + "At groups=1, all inputs are convolved to all outputs." |
| 209 | + "At groups=2, the operation becomes equivalent to having two convolution" |
| 210 | + "layers side by side, each seeing half the input channels, and producing" |
| 211 | + "half the output channels, and both subsequently concatenated."); |
| 212 | + DMLC_DECLARE_FIELD(layout) |
| 213 | + .add_enum("NCHW", kNCHW) |
| 214 | + .add_enum("NHWC", kNHWC) |
| 215 | + .set_default(kNCHW) |
| 216 | + .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." |
| 217 | + "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" |
| 218 | + "dimensions respectively. Convolution is applied on the 'H' and" |
| 219 | + "'W' dimensions."); |
| 220 | + DMLC_DECLARE_FIELD(ceil_mode).set_default(false) |
| 221 | + .describe("When true, will use ceil instead of floor to compute the output shape."); |
| 222 | + } |
| 223 | +}; |
| 224 | + |
| 225 | + |
| 226 | +struct GlobalPool2DParam : public dmlc::Parameter<GlobalPool2DParam> { |
| 227 | + int layout; |
| 228 | + |
| 229 | + DMLC_DECLARE_PARAMETER(GlobalPool2DParam) { |
| 230 | + DMLC_DECLARE_FIELD(layout) |
| 231 | + .add_enum("NCHW", kNCHW) |
| 232 | + .add_enum("NHWC", kNHWC) |
| 233 | + .set_default(kNCHW) |
| 234 | + .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." |
| 235 | + "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" |
| 236 | + "dimensions respectively. Convolution is applied on the 'H' and" |
| 237 | + "'W' dimensions."); |
| 238 | + } |
| 239 | +}; |
| 240 | + |
88 | 241 | } // namespace top
|
89 | 242 | } // namespace nnvm
|
90 | 243 |
|
|
0 commit comments