Commit 5f677945 by tqchen Committed by Tianqi Chen

[TOP] Rename conv pool parameter back to 2d

parent 55592ece
......@@ -101,7 +101,7 @@ struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
}
};
struct ConvParam : public dmlc::Parameter<ConvParam> {
struct Conv2DParam : public dmlc::Parameter<Conv2DParam> {
int channels;
TShape kernel_size;
TShape strides;
......@@ -111,7 +111,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
int layout;
bool use_bias;
DMLC_DECLARE_PARAMETER(ConvParam) {
DMLC_DECLARE_PARAMETER(Conv2DParam) {
DMLC_DECLARE_FIELD(channels)
.describe("The dimensionality of the output space"
"i.e. the number of output channels in the convolution.");
......@@ -148,7 +148,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
};
struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
struct Conv2DTransposeParam : public dmlc::Parameter<Conv2DTransposeParam> {
int channels;
TShape kernel_size;
TShape strides;
......@@ -159,7 +159,7 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
int layout;
bool use_bias;
DMLC_DECLARE_PARAMETER(ConvTransposeParam) {
DMLC_DECLARE_PARAMETER(Conv2DTransposeParam) {
DMLC_DECLARE_FIELD(channels)
.describe("The dimensionality of the output space"
"i.e. the number of output channels in the convolution.");
......@@ -198,7 +198,7 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
};
struct PoolParam : public dmlc::Parameter<PoolParam> {
struct Pool2DParam : public dmlc::Parameter<Pool2DParam> {
TShape pool_size;
TShape strides;
TShape padding;
......@@ -206,7 +206,7 @@ struct PoolParam : public dmlc::Parameter<PoolParam> {
int layout;
bool ceil_mode;
DMLC_DECLARE_PARAMETER(PoolParam) {
DMLC_DECLARE_PARAMETER(Pool2DParam) {
DMLC_DECLARE_FIELD(pool_size)
.describe("Size of the pooling windows..");
DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
......@@ -234,10 +234,10 @@ struct PoolParam : public dmlc::Parameter<PoolParam> {
};
struct GlobalPoolParam : public dmlc::Parameter<GlobalPoolParam> {
struct GlobalPool2DParam : public dmlc::Parameter<GlobalPool2DParam> {
int layout;
DMLC_DECLARE_PARAMETER(GlobalPoolParam) {
DMLC_DECLARE_PARAMETER(GlobalPool2DParam) {
DMLC_DECLARE_FIELD(layout)
.add_enum("NCHW", kNCHW)
.add_enum("NHWC", kNHWC)
......
......@@ -15,12 +15,12 @@ namespace nnvm {
namespace top {
// conv2d
DMLC_REGISTER_PARAMETER(ConvParam);
DMLC_REGISTER_PARAMETER(Conv2DParam);
inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const ConvParam& param = nnvm::get<ConvParam>(attrs.parsed);
const Conv2DParam& param = nnvm::get<Conv2DParam>(attrs.parsed);
if (param.use_bias) {
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, weight, bias]";
} else {
......@@ -51,10 +51,10 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
wshape = ConvertLayout(wshape, kNCHW, param.layout);
wshape[0] *= param.groups;
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvParam::kWeight, wshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DParam::kWeight, wshape);
if (param.use_bias) {
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape,
ConvParam::kBias, TShape({param.channels}));
Conv2DParam::kBias, TShape({param.channels}));
}
// dilation
dim_t dilated_ksize_y = 1 + (param.kernel_size[0] - 1) * param.dilation[0];
......@@ -79,7 +79,7 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
if (oshape[3] && param.strides[1] == 1) {
dshape[3] = oshape[3] + dilated_ksize_x - 1 - 2 * param.padding[1];
}
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvParam::kData,
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DParam::kData,
ConvertLayout(dshape, kNCHW, param.layout));
// Check whether the kernel sizes are valid
if (dshape[2] != 0) {
......@@ -112,29 +112,29 @@ a bias vector is created and added to the outputs.
.add_argument("data", "4D Tensor", "Input data.")
.add_argument("weight", "4D Tensor", "Weight matrix.")
.add_argument("bias", "1D Tensor", "Bias parameter.")
.add_arguments(ConvParam::__FIELDS__())
.set_attr_parser(ParamParser<ConvParam>)
.add_arguments(Conv2DParam::__FIELDS__())
.set_attr_parser(ParamParser<Conv2DParam>)
.set_num_outputs(1)
.set_num_inputs(UseBiasNumInputs<ConvParam>)
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<ConvParam>)
.set_num_inputs(UseBiasNumInputs<Conv2DParam>)
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<Conv2DParam>)
.set_attr<FInferShape>("FInferShape", Conv2DInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<-1, 1>)
.set_support_level(2);
DMLC_REGISTER_PARAMETER(ConvTransposeParam);
DMLC_REGISTER_PARAMETER(Conv2DTransposeParam);
inline bool ConvTransposeInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const ConvTransposeParam& param = nnvm::get<ConvTransposeParam>(attrs.parsed);
inline bool Conv2DTransposeInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const Conv2DTransposeParam& param = nnvm::get<Conv2DTransposeParam>(attrs.parsed);
if (param.use_bias) {
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, weight, bias]";
} else {
CHECK_EQ(in_shape->size(), 2U) << "Input:[data, weight]";
}
CHECK_EQ(out_shape->size(), 1U);
const TShape& dshape = (*in_shape)[ConvTransposeParam::kData];
const TShape& dshape = (*in_shape)[Conv2DTransposeParam::kData];
if (dshape.ndim() == 0) return false;
TShape dshape_nchw = ConvertLayout(dshape, param.layout, kNCHW);
......@@ -154,11 +154,11 @@ inline bool ConvTransposeInferShape(const nnvm::NodeAttrs& attrs,
param.kernel_size[0], param.kernel_size[1]});
wshape = ConvertLayout(wshape, kNCHW, param.layout);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, ConvTransposeParam::kWeight, wshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, Conv2DTransposeParam::kWeight, wshape);
if (param.use_bias) {
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape,
ConvTransposeParam::kBias,
Conv2DTransposeParam::kBias,
TShape({param.channels}));
}
// dilation
......@@ -201,12 +201,12 @@ said convolution.
.add_argument("data", "4D Tensor", "Input data.")
.add_argument("weight", "4D Tensor", "Weight matrix.")
.add_argument("bias", "1D Tensor", "Bias parameter.")
.add_arguments(ConvTransposeParam::__FIELDS__())
.set_attr_parser(ParamParser<ConvTransposeParam>)
.add_arguments(Conv2DTransposeParam::__FIELDS__())
.set_attr_parser(ParamParser<Conv2DTransposeParam>)
.set_num_outputs(1)
.set_num_inputs(UseBiasNumInputs<ConvTransposeParam>)
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<ConvTransposeParam>)
.set_attr<FInferShape>("FInferShape", ConvTransposeInferShape)
.set_num_inputs(UseBiasNumInputs<Conv2DTransposeParam>)
.set_attr<FListInputNames>("FListInputNames", UseBiasListInputNames<Conv2DTransposeParam>)
.set_attr<FInferShape>("FInferShape", Conv2DTransposeInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<-1, 1>)
.set_support_level(2);
......
......@@ -14,12 +14,12 @@
namespace nnvm {
namespace top {
DMLC_REGISTER_PARAMETER(PoolParam);
DMLC_REGISTER_PARAMETER(Pool2DParam);
inline bool Pool2DInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const PoolParam& param = nnvm::get<PoolParam>(attrs.parsed);
const Pool2DParam& param = nnvm::get<Pool2DParam>(attrs.parsed);
CHECK_EQ(in_shape->size(), 1U);
CHECK_EQ(out_shape->size(), 1U);
......@@ -68,8 +68,8 @@ NNVM_REGISTER_OP(max_pool2d)
)code" NNVM_ADD_FILELINE)
.add_argument("data", "4D Tensor", "Input data.")
.add_arguments(PoolParam::__FIELDS__())
.set_attr_parser(ParamParser<PoolParam>)
.add_arguments(Pool2DParam::__FIELDS__())
.set_attr_parser(ParamParser<Pool2DParam>)
.set_num_outputs(1)
.set_num_inputs(1)
.set_attr<FInferShape>("FInferShape", Pool2DInferShape)
......@@ -92,8 +92,8 @@ NNVM_REGISTER_OP(avg_pool2d)
)code" NNVM_ADD_FILELINE)
.add_argument("data", "4D Tensor", "Input data.")
.add_arguments(PoolParam::__FIELDS__())
.set_attr_parser(ParamParser<PoolParam>)
.add_arguments(Pool2DParam::__FIELDS__())
.set_attr_parser(ParamParser<Pool2DParam>)
.set_num_outputs(1)
.set_num_inputs(1)
.set_attr<FInferShape>("FInferShape", Pool2DInferShape)
......@@ -101,12 +101,12 @@ NNVM_REGISTER_OP(avg_pool2d)
.set_support_level(2);
DMLC_REGISTER_PARAMETER(GlobalPoolParam);
DMLC_REGISTER_PARAMETER(GlobalPool2DParam);
inline bool GlobalPool2DInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const GlobalPoolParam& param = nnvm::get<GlobalPoolParam>(attrs.parsed);
const GlobalPool2DParam& param = nnvm::get<GlobalPool2DParam>(attrs.parsed);
CHECK_EQ(in_shape->size(), 1U);
CHECK_EQ(out_shape->size(), 1U);
TShape dshape = (*in_shape)[0];
......@@ -129,8 +129,8 @@ NNVM_REGISTER_OP(global_max_pool2d)
)code" NNVM_ADD_FILELINE)
.add_argument("data", "4D Tensor", "Input data.")
.add_arguments(GlobalPoolParam::__FIELDS__())
.set_attr_parser(ParamParser<GlobalPoolParam>)
.add_arguments(GlobalPool2DParam::__FIELDS__())
.set_attr_parser(ParamParser<GlobalPool2DParam>)
.set_num_outputs(1)
.set_num_inputs(1)
.set_attr<FInferShape>("FInferShape", GlobalPool2DInferShape)
......@@ -148,8 +148,8 @@ NNVM_REGISTER_OP(global_avg_pool2d)
)code" NNVM_ADD_FILELINE)
.add_argument("data", "4D Tensor", "Input data.")
.add_arguments(GlobalPoolParam::__FIELDS__())
.set_attr_parser(ParamParser<GlobalPoolParam>)
.add_arguments(GlobalPool2DParam::__FIELDS__())
.set_attr_parser(ParamParser<GlobalPool2DParam>)
.set_num_outputs(1)
.set_num_inputs(1)
.set_attr<FInferShape>("FInferShape", GlobalPool2DInferShape)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment