Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
5f677945
Commit
5f677945
authored
Sep 12, 2017
by
tqchen
Committed by
Tianqi Chen
May 29, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOP] Rename conv pool parameter back to 2d
parent
55592ece
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
42 additions
and
42 deletions
+42
-42
nnvm/include/nnvm/top/nn.h
+8
-8
nnvm/src/top/nn/convolution.cc
+22
-22
nnvm/src/top/nn/pooling.cc
+12
-12
No files found.
nnvm/include/nnvm/top/nn.h
View file @
5f677945
...
...
@@ -101,7 +101,7 @@ struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
}
};
struct
Conv
Param
:
public
dmlc
::
Parameter
<
Conv
Param
>
{
struct
Conv
2DParam
:
public
dmlc
::
Parameter
<
Conv2D
Param
>
{
int
channels
;
TShape
kernel_size
;
TShape
strides
;
...
...
@@ -111,7 +111,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
int
layout
;
bool
use_bias
;
DMLC_DECLARE_PARAMETER
(
ConvParam
)
{
DMLC_DECLARE_PARAMETER
(
Conv
2D
Param
)
{
DMLC_DECLARE_FIELD
(
channels
)
.
describe
(
"The dimensionality of the output space"
"i.e. the number of output channels in the convolution."
);
...
...
@@ -148,7 +148,7 @@ struct ConvParam : public dmlc::Parameter<ConvParam> {
};
struct
Conv
TransposeParam
:
public
dmlc
::
Parameter
<
Conv
TransposeParam
>
{
struct
Conv
2DTransposeParam
:
public
dmlc
::
Parameter
<
Conv2D
TransposeParam
>
{
int
channels
;
TShape
kernel_size
;
TShape
strides
;
...
...
@@ -159,7 +159,7 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
int
layout
;
bool
use_bias
;
DMLC_DECLARE_PARAMETER
(
ConvTransposeParam
)
{
DMLC_DECLARE_PARAMETER
(
Conv
2D
TransposeParam
)
{
DMLC_DECLARE_FIELD
(
channels
)
.
describe
(
"The dimensionality of the output space"
"i.e. the number of output channels in the convolution."
);
...
...
@@ -198,7 +198,7 @@ struct ConvTransposeParam : public dmlc::Parameter<ConvTransposeParam> {
};
struct
Pool
Param
:
public
dmlc
::
Parameter
<
Pool
Param
>
{
struct
Pool
2DParam
:
public
dmlc
::
Parameter
<
Pool2D
Param
>
{
TShape
pool_size
;
TShape
strides
;
TShape
padding
;
...
...
@@ -206,7 +206,7 @@ struct PoolParam : public dmlc::Parameter<PoolParam> {
int
layout
;
bool
ceil_mode
;
DMLC_DECLARE_PARAMETER
(
PoolParam
)
{
DMLC_DECLARE_PARAMETER
(
Pool
2D
Param
)
{
DMLC_DECLARE_FIELD
(
pool_size
)
.
describe
(
"Size of the pooling windows.."
);
DMLC_DECLARE_FIELD
(
strides
).
set_default
(
TShape
({
1
,
1
}))
...
...
@@ -234,10 +234,10 @@ struct PoolParam : public dmlc::Parameter<PoolParam> {
};
struct
GlobalPool
Param
:
public
dmlc
::
Parameter
<
GlobalPool
Param
>
{
struct
GlobalPool
2DParam
:
public
dmlc
::
Parameter
<
GlobalPool2D
Param
>
{
int
layout
;
DMLC_DECLARE_PARAMETER
(
GlobalPoolParam
)
{
DMLC_DECLARE_PARAMETER
(
GlobalPool
2D
Param
)
{
DMLC_DECLARE_FIELD
(
layout
)
.
add_enum
(
"NCHW"
,
kNCHW
)
.
add_enum
(
"NHWC"
,
kNHWC
)
...
...
nnvm/src/top/nn/convolution.cc
View file @
5f677945
...
...
@@ -15,12 +15,12 @@ namespace nnvm {
namespace
top
{
// conv2d
DMLC_REGISTER_PARAMETER
(
ConvParam
);
DMLC_REGISTER_PARAMETER
(
Conv
2D
Param
);
inline
bool
Conv2DInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>*
in_shape
,
std
::
vector
<
TShape
>*
out_shape
)
{
const
Conv
Param
&
param
=
nnvm
::
get
<
Conv
Param
>
(
attrs
.
parsed
);
const
Conv
2DParam
&
param
=
nnvm
::
get
<
Conv2D
Param
>
(
attrs
.
parsed
);
if
(
param
.
use_bias
)
{
CHECK_EQ
(
in_shape
->
size
(),
3U
)
<<
"Input:[data, weight, bias]"
;
}
else
{
...
...
@@ -51,10 +51,10 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
wshape
=
ConvertLayout
(
wshape
,
kNCHW
,
param
.
layout
);
wshape
[
0
]
*=
param
.
groups
;
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
ConvParam
::
kWeight
,
wshape
);
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
Conv
2D
Param
::
kWeight
,
wshape
);
if
(
param
.
use_bias
)
{
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
ConvParam
::
kBias
,
TShape
({
param
.
channels
}));
Conv
2D
Param
::
kBias
,
TShape
({
param
.
channels
}));
}
// dilation
dim_t
dilated_ksize_y
=
1
+
(
param
.
kernel_size
[
0
]
-
1
)
*
param
.
dilation
[
0
];
...
...
@@ -79,7 +79,7 @@ inline bool Conv2DInferShape(const nnvm::NodeAttrs& attrs,
if
(
oshape
[
3
]
&&
param
.
strides
[
1
]
==
1
)
{
dshape
[
3
]
=
oshape
[
3
]
+
dilated_ksize_x
-
1
-
2
*
param
.
padding
[
1
];
}
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
ConvParam
::
kData
,
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
Conv
2D
Param
::
kData
,
ConvertLayout
(
dshape
,
kNCHW
,
param
.
layout
));
// Check whether the kernel sizes are valid
if
(
dshape
[
2
]
!=
0
)
{
...
...
@@ -112,29 +112,29 @@ a bias vector is created and added to the outputs.
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_argument
(
"weight"
,
"4D Tensor"
,
"Weight matrix."
)
.
add_argument
(
"bias"
,
"1D Tensor"
,
"Bias parameter."
)
.
add_arguments
(
ConvParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
ConvParam
>
)
.
add_arguments
(
Conv
2D
Param
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
Conv
2D
Param
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
UseBiasNumInputs
<
ConvParam
>
)
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
UseBiasListInputNames
<
ConvParam
>
)
.
set_num_inputs
(
UseBiasNumInputs
<
Conv
2D
Param
>
)
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
UseBiasListInputNames
<
Conv
2D
Param
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
Conv2DInferShape
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<-
1
,
1
>
)
.
set_support_level
(
2
);
DMLC_REGISTER_PARAMETER
(
ConvTransposeParam
);
DMLC_REGISTER_PARAMETER
(
Conv
2D
TransposeParam
);
inline
bool
ConvTransposeInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>*
in_shape
,
std
::
vector
<
TShape
>*
out_shape
)
{
const
Conv
TransposeParam
&
param
=
nnvm
::
get
<
Conv
TransposeParam
>
(
attrs
.
parsed
);
inline
bool
Conv
2D
TransposeInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>*
in_shape
,
std
::
vector
<
TShape
>*
out_shape
)
{
const
Conv
2DTransposeParam
&
param
=
nnvm
::
get
<
Conv2D
TransposeParam
>
(
attrs
.
parsed
);
if
(
param
.
use_bias
)
{
CHECK_EQ
(
in_shape
->
size
(),
3U
)
<<
"Input:[data, weight, bias]"
;
}
else
{
CHECK_EQ
(
in_shape
->
size
(),
2U
)
<<
"Input:[data, weight]"
;
}
CHECK_EQ
(
out_shape
->
size
(),
1U
);
const
TShape
&
dshape
=
(
*
in_shape
)[
ConvTransposeParam
::
kData
];
const
TShape
&
dshape
=
(
*
in_shape
)[
Conv
2D
TransposeParam
::
kData
];
if
(
dshape
.
ndim
()
==
0
)
return
false
;
TShape
dshape_nchw
=
ConvertLayout
(
dshape
,
param
.
layout
,
kNCHW
);
...
...
@@ -154,11 +154,11 @@ inline bool ConvTransposeInferShape(const nnvm::NodeAttrs& attrs,
param
.
kernel_size
[
0
],
param
.
kernel_size
[
1
]});
wshape
=
ConvertLayout
(
wshape
,
kNCHW
,
param
.
layout
);
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
ConvTransposeParam
::
kWeight
,
wshape
);
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
Conv
2D
TransposeParam
::
kWeight
,
wshape
);
if
(
param
.
use_bias
)
{
NNVM_ASSIGN_INPUT_SHAPE
(
attrs
,
*
in_shape
,
ConvTransposeParam
::
kBias
,
Conv
2D
TransposeParam
::
kBias
,
TShape
({
param
.
channels
}));
}
// dilation
...
...
@@ -201,12 +201,12 @@ said convolution.
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_argument
(
"weight"
,
"4D Tensor"
,
"Weight matrix."
)
.
add_argument
(
"bias"
,
"1D Tensor"
,
"Bias parameter."
)
.
add_arguments
(
ConvTransposeParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
ConvTransposeParam
>
)
.
add_arguments
(
Conv
2D
TransposeParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
Conv
2D
TransposeParam
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
UseBiasNumInputs
<
ConvTransposeParam
>
)
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
UseBiasListInputNames
<
ConvTransposeParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ConvTransposeInferShape
)
.
set_num_inputs
(
UseBiasNumInputs
<
Conv
2D
TransposeParam
>
)
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
UseBiasListInputNames
<
Conv
2D
TransposeParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
Conv
2D
TransposeInferShape
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<-
1
,
1
>
)
.
set_support_level
(
2
);
...
...
nnvm/src/top/nn/pooling.cc
View file @
5f677945
...
...
@@ -14,12 +14,12 @@
namespace
nnvm
{
namespace
top
{
DMLC_REGISTER_PARAMETER
(
PoolParam
);
DMLC_REGISTER_PARAMETER
(
Pool
2D
Param
);
inline
bool
Pool2DInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>*
in_shape
,
std
::
vector
<
TShape
>*
out_shape
)
{
const
Pool
Param
&
param
=
nnvm
::
get
<
Pool
Param
>
(
attrs
.
parsed
);
const
Pool
2DParam
&
param
=
nnvm
::
get
<
Pool2D
Param
>
(
attrs
.
parsed
);
CHECK_EQ
(
in_shape
->
size
(),
1U
);
CHECK_EQ
(
out_shape
->
size
(),
1U
);
...
...
@@ -68,8 +68,8 @@ NNVM_REGISTER_OP(max_pool2d)
)code"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_arguments
(
PoolParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
PoolParam
>
)
.
add_arguments
(
Pool
2D
Param
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
Pool
2D
Param
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
1
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
Pool2DInferShape
)
...
...
@@ -92,8 +92,8 @@ NNVM_REGISTER_OP(avg_pool2d)
)code"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_arguments
(
PoolParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
PoolParam
>
)
.
add_arguments
(
Pool
2D
Param
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
Pool
2D
Param
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
1
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
Pool2DInferShape
)
...
...
@@ -101,12 +101,12 @@ NNVM_REGISTER_OP(avg_pool2d)
.
set_support_level
(
2
);
DMLC_REGISTER_PARAMETER
(
GlobalPoolParam
);
DMLC_REGISTER_PARAMETER
(
GlobalPool
2D
Param
);
inline
bool
GlobalPool2DInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>*
in_shape
,
std
::
vector
<
TShape
>*
out_shape
)
{
const
GlobalPool
Param
&
param
=
nnvm
::
get
<
GlobalPool
Param
>
(
attrs
.
parsed
);
const
GlobalPool
2DParam
&
param
=
nnvm
::
get
<
GlobalPool2D
Param
>
(
attrs
.
parsed
);
CHECK_EQ
(
in_shape
->
size
(),
1U
);
CHECK_EQ
(
out_shape
->
size
(),
1U
);
TShape
dshape
=
(
*
in_shape
)[
0
];
...
...
@@ -129,8 +129,8 @@ NNVM_REGISTER_OP(global_max_pool2d)
)code"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_arguments
(
GlobalPoolParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
GlobalPoolParam
>
)
.
add_arguments
(
GlobalPool
2D
Param
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
GlobalPool
2D
Param
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
1
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
GlobalPool2DInferShape
)
...
...
@@ -148,8 +148,8 @@ NNVM_REGISTER_OP(global_avg_pool2d)
)code"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"4D Tensor"
,
"Input data."
)
.
add_arguments
(
GlobalPoolParam
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
GlobalPoolParam
>
)
.
add_arguments
(
GlobalPool
2D
Param
::
__FIELDS__
())
.
set_attr_parser
(
ParamParser
<
GlobalPool
2D
Param
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
1
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
GlobalPool2DInferShape
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment