Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
986caf71
Commit
986caf71
authored
Sep 09, 2017
by
Tianqi Chen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOP] Level1 complete (#3)
parent
be1660b1
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
355 additions
and
170 deletions
+355
-170
nnvm/include/nnvm/top/nn.h
+58
-0
nnvm/include/nnvm/top/tensor.h
+27
-4
nnvm/src/top/elemwise_op_common.h
+20
-5
nnvm/src/top/nn.cc
+117
-1
nnvm/src/top/tensor.cc
+79
-30
nnvm/tests/python/test_gradient.py
+0
-24
nnvm/tests/python/test_graph.py
+22
-65
nnvm/tests/python/test_symbol.py
+7
-33
nnvm/tests/python/test_top_level1.py
+25
-8
No files found.
nnvm/include/nnvm/top/nn.h
View file @
986caf71
...
...
@@ -15,6 +15,7 @@ namespace top {
struct
DenseParam
:
public
dmlc
::
Parameter
<
DenseParam
>
{
int
units
;
bool
use_bias
;
DMLC_DECLARE_PARAMETER
(
DenseParam
)
{
DMLC_DECLARE_FIELD
(
units
).
set_lower_bound
(
1
)
.
describe
(
"Number of hidden units of the dense transformation."
);
...
...
@@ -27,6 +28,63 @@ struct DenseParam : public dmlc::Parameter<DenseParam> {
static
const
constexpr
int
kBias
=
2
;
};
struct
DropoutParam
:
public
dmlc
::
Parameter
<
DropoutParam
>
{
float
rate
;
DMLC_DECLARE_PARAMETER
(
DropoutParam
)
{
DMLC_DECLARE_FIELD
(
rate
).
set_default
(
0
.
5
)
.
set_range
(
0
,
1
)
.
describe
(
"Fraction of the input that gets dropped out during training time."
);
}
};
struct
BatchNormParam
:
public
dmlc
::
Parameter
<
BatchNormParam
>
{
int
axis
;
float
epsilon
;
float
momentum
;
bool
center
;
bool
scale
;
DMLC_DECLARE_PARAMETER
(
BatchNormParam
)
{
DMLC_DECLARE_FIELD
(
axis
).
set_default
(
1
)
.
describe
(
"Specify which shape axis the channel is specified."
);
DMLC_DECLARE_FIELD
(
epsilon
).
set_default
(
1e-5
f
)
.
describe
(
"Small float added to variance to avoid dividing by zero."
);
DMLC_DECLARE_FIELD
(
center
).
set_default
(
true
)
.
describe
(
"If True, add offset of `beta` to normalized tensor."
"If False, `beta` is ignored."
);
DMLC_DECLARE_FIELD
(
scale
).
set_default
(
true
)
.
describe
(
"If True, multiply by `gamma`. If False, `gamma` is not used."
"When the next layer is piecewise linear (also e.g. `nn.relu`),"
"this can be disabled since the scaling"
"will be done by the next layer."
);
}
// constants
static
const
constexpr
int
kData
=
0
;
static
const
constexpr
int
kGamma
=
1
;
static
const
constexpr
int
kBeta
=
2
;
static
const
constexpr
int
kMovingMean
=
3
;
static
const
constexpr
int
kMovingVariance
=
4
;
};
struct
SoftmaxParam
:
public
dmlc
::
Parameter
<
SoftmaxParam
>
{
int
axis
;
DMLC_DECLARE_PARAMETER
(
SoftmaxParam
)
{
DMLC_DECLARE_FIELD
(
axis
).
set_default
(
-
1
)
.
describe
(
"The axis to sum over when computing softmax."
);
}
};
struct
LogSoftmaxParam
:
public
dmlc
::
Parameter
<
LogSoftmaxParam
>
{
int
axis
;
DMLC_DECLARE_PARAMETER
(
LogSoftmaxParam
)
{
DMLC_DECLARE_FIELD
(
axis
).
set_default
(
-
1
)
.
describe
(
"The axis to sum over when computing softmax."
);
}
};
}
// namespace top
}
// namespace nnvm
...
...
nnvm/include/nnvm/top/tensor.h
View file @
986caf71
...
...
@@ -9,14 +9,37 @@
namespace
nnvm
{
namespace
top
{
struct
Concat
Param
:
public
dmlc
::
Parameter
<
Concat
Param
>
{
int
dim
;
DMLC_DECLARE_PARAMETER
(
ConcatParam
)
{
DMLC_DECLARE_FIELD
(
dim
).
set_range
(
0
,
4
).
set_default
(
1
)
struct
Concat
enateParam
:
public
dmlc
::
Parameter
<
Concatenate
Param
>
{
int
axis
;
DMLC_DECLARE_PARAMETER
(
Concat
enate
Param
)
{
DMLC_DECLARE_FIELD
(
axis
).
set_lower_bound
(
0
).
set_default
(
1
)
.
describe
(
"the axis to be concated."
);
}
};
enum
TypeFlag
{
kFloat32
=
0
,
kFloat64
=
1
,
kFloat16
=
2
,
kUint8
=
3
,
kInt32
=
4
,
kInt8
=
5
,
kInt64
=
6
,
};
struct
CastParam
:
public
dmlc
::
Parameter
<
CastParam
>
{
int
dtype
;
DMLC_DECLARE_PARAMETER
(
CastParam
)
{
DMLC_DECLARE_FIELD
(
dtype
)
.
add_enum
(
"float32"
,
kFloat32
)
.
add_enum
(
"float64"
,
kFloat64
)
.
add_enum
(
"float16"
,
kFloat16
)
.
add_enum
(
"uint8"
,
kUint8
)
.
add_enum
(
"int32"
,
kInt32
)
.
describe
(
"Output data type."
);
}
};
}
// namespace top
}
// namespace nnvm
...
...
nnvm/src/top/elemwise_op_common.h
View file @
986caf71
...
...
@@ -57,7 +57,7 @@ inline bool ElemwiseAttr(const nnvm::NodeAttrs& attrs,
}
template
<
int
n_in
,
int
n_out
>
inline
bool
ElemwiseShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
inline
bool
ElemwiseShape
(
const
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>
*
in_attrs
,
std
::
vector
<
TShape
>
*
out_attrs
)
{
if
(
n_in
!=
-
1
)
{
...
...
@@ -71,7 +71,7 @@ inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs,
}
template
<
int
n_in
,
int
n_out
>
inline
bool
ElemwiseType
(
const
nnvm
::
NodeAttrs
&
attrs
,
inline
bool
ElemwiseType
(
const
NodeAttrs
&
attrs
,
std
::
vector
<
int
>
*
in_attrs
,
std
::
vector
<
int
>
*
out_attrs
)
{
if
(
n_in
!=
-
1
)
{
...
...
@@ -88,13 +88,28 @@ inline bool ElemwiseType(const nnvm::NodeAttrs& attrs,
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<
nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
\
.set_attr<
nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
\
.set_attr<
nnvm::FInplaceOption>("FInplaceOption",
\
.set_attr<
FInferShape>("FInferShape", ElemwiseShape<1, 1>)
\
.set_attr<
FInferType>("FInferType", ElemwiseType<1, 1>)
\
.set_attr<
FInplaceOption>("FInplaceOption",
\
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "Tensor", "The input tensor.")
#define NNVM_REGISTER_ELEMWISE_BINARY_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
}
// namespace top
}
// namespace nnvm
#endif // NNVM_TOP_ELEMWISE_OP_COMMON_H_
nnvm/src/top/nn.cc
View file @
986caf71
...
...
@@ -73,7 +73,7 @@ If ``use_bias`` is set to be false, then the ``bias`` term is ignored.
.
set_attr_parser
(
ParamParser
<
DenseParam
>
)
.
set_num_outputs
(
1
)
.
set_num_inputs
([](
const
NodeAttrs
&
attrs
)
{
const
DenseParam
&
param
=
nnvm
::
get
<
DenseParam
>
(
attrs
.
parsed
);
const
DenseParam
&
param
=
get
<
DenseParam
>
(
attrs
.
parsed
);
return
param
.
use_bias
?
3
:
2
;
})
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
DenseListInputNames
)
...
...
@@ -90,5 +90,121 @@ NNVM_REGISTER_ELEMWISE_UNARY_OP(relu)
)code"
NNVM_ADD_FILELINE
)
.
set_support_level
(
1
);
// dropout
DMLC_REGISTER_PARAMETER
(
DropoutParam
);
NNVM_REGISTER_OP
(
dropout
)
.
describe
(
R"(Applies dropout operation to input array.
- During training, each element of the input is set to zero with probability p.
The whole array is rescaled by :math:`1/(1-p)` to keep the expected
sum of the input unchanged.
)"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"Tensor"
,
"Input to which dropout will be applied"
)
.
set_num_inputs
(
1
)
.
set_num_outputs
(
2
)
.
set_attr_parser
(
ParamParser
<
DropoutParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ElemwiseShape
<
1
,
2
>
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<
1
,
2
>
)
.
set_attr
<
FNumVisibleOutputs
>
(
"FNumVisibleOutputs"
,
[](
const
NodeAttrs
&
attrs
)
{
return
1
;
})
.
set_attr
<
FListOutputNames
>
(
"FListOutputNames"
,
[](
const
NodeAttrs
&
attrs
)
{
return
std
::
vector
<
std
::
string
>
{
"output"
,
"mask"
};
})
.
set_support_level
(
1
);
// batchnorm
DMLC_REGISTER_PARAMETER
(
BatchNormParam
);
NNVM_REGISTER_OP
(
batch_norm
)
.
describe
(
R"(Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated
by::
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel
axis to be the last item in the input shape.
)"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"Tensor"
,
"Input to which dropout will be applied"
)
.
add_argument
(
"gamma"
,
"Tensor"
,
"The gamma scale factor"
)
.
add_argument
(
"beta"
,
"Tensor"
,
"The beta offset factor"
)
.
add_argument
(
"moving_mean"
,
"Tensor"
,
"running mean of input"
)
.
add_argument
(
"moving_var"
,
"Tensor"
,
"running variance of input"
)
.
set_num_inputs
(
5
)
.
set_num_outputs
(
3
)
.
set_attr_parser
(
ParamParser
<
BatchNormParam
>
)
.
set_attr
<
FListInputNames
>
(
"FListInputNames"
,
[](
const
NodeAttrs
&
attrs
)
{
return
std
::
vector
<
std
::
string
>
{
"data"
,
"gamma"
,
"beta"
,
"moving_mean"
,
"moving_var"
};
})
.
set_attr
<
FListOutputNames
>
(
"FListOutputNames"
,
[](
const
NodeAttrs
&
attrs
)
{
return
std
::
vector
<
std
::
string
>
{
"output"
,
"mean"
,
"var"
};
})
.
set_attr
<
FNumVisibleOutputs
>
(
"FNumVisibleOutputs"
,
[](
const
NodeAttrs
&
attrs
)
{
return
1
;
})
.
set_attr
<
FMutateInputs
>
(
"FListMutateInputs"
,
[](
const
NodeAttrs
&
attrs
)
{
return
std
::
vector
<
uint32_t
>
{
3
,
4
};
})
.
set_support_level
(
1
);
// softmax
DMLC_REGISTER_PARAMETER
(
SoftmaxParam
);
NNVM_REGISTER_OP
(
softmax
)
.
describe
(
R"code(Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
)code"
NNVM_ADD_FILELINE
)
.
set_num_inputs
(
1
)
.
set_num_outputs
(
1
)
.
set_attr_parser
(
ParamParser
<
SoftmaxParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ElemwiseShape
<
1
,
1
>
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<
1
,
1
>
)
.
set_support_level
(
1
);
// log_softmax
DMLC_REGISTER_PARAMETER
(
LogSoftmaxParam
);
NNVM_REGISTER_OP
(
log_softmax
)
.
describe
(
R"code(Computes softmax.
.. math:: \text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
)code"
NNVM_ADD_FILELINE
)
.
set_num_inputs
(
1
)
.
set_num_outputs
(
1
)
.
set_attr_parser
(
ParamParser
<
LogSoftmaxParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ElemwiseShape
<
1
,
1
>
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<
1
,
1
>
)
.
set_support_level
(
1
);
}
// namespace top
}
// namespace nnvm
nnvm/src/top/tensor.cc
View file @
986caf71
...
...
@@ -97,33 +97,33 @@ Example::
.
add_argument
(
"data"
,
"Tensor"
,
"Input data."
)
.
set_support_level
(
1
);
// concat
TODO(eric): change name(concat->concatenate) and argument(dim->axis)
DMLC_REGISTER_PARAMETER
(
ConcatParam
);
// concat
enate
DMLC_REGISTER_PARAMETER
(
Concat
enate
Param
);
inline
bool
ConcatInferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>
*
in_shape
,
std
::
vector
<
TShape
>
*
out_shape
)
{
const
Concat
Param
&
param
=
nnvm
::
get
<
Concat
Param
>
(
attrs
.
parsed
);
inline
bool
Concat
enate
InferShape
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
TShape
>
*
in_shape
,
std
::
vector
<
TShape
>
*
out_shape
)
{
const
Concat
enateParam
&
param
=
nnvm
::
get
<
Concatenate
Param
>
(
attrs
.
parsed
);
TShape
dshape
;
dim_t
size
=
0
;
bool
has_zero
=
false
;
for
(
size_t
i
=
0
;
i
<
in_shape
->
size
();
++
i
)
{
TShape
tmp
=
(
*
in_shape
)[
i
];
if
(
tmp
.
ndim
())
{
CHECK_LT
(
static_cast
<
dim_t
>
(
param
.
dim
),
tmp
.
ndim
())
<<
"concat dim "
<<
param
.
dim
<<
" out of range of input shape "
<<
tmp
;
has_zero
=
tmp
[
param
.
dim
]
==
0
||
has_zero
;
size
+=
tmp
[
param
.
dim
];
tmp
[
param
.
dim
]
=
0
;
CHECK_LT
(
static_cast
<
dim_t
>
(
param
.
axis
),
tmp
.
ndim
())
<<
"concat dim "
<<
param
.
axis
<<
" out of range of input shape "
<<
tmp
;
has_zero
=
tmp
[
param
.
axis
]
==
0
||
has_zero
;
size
+=
tmp
[
param
.
axis
];
tmp
[
param
.
axis
]
=
0
;
shape_assign
(
&
dshape
,
tmp
);
}
}
TShape
tmp
=
(
*
out_shape
)[
0
];
if
(
tmp
.
ndim
())
{
CHECK_LT
(
static_cast
<
dim_t
>
(
param
.
dim
),
tmp
.
ndim
())
<<
"concat dim "
<<
param
.
dim
<<
" out of range of input shape "
<<
tmp
;
tmp
[
param
.
dim
]
=
0
;
CHECK_LT
(
static_cast
<
dim_t
>
(
param
.
axis
),
tmp
.
ndim
())
<<
"concat dim "
<<
param
.
axis
<<
" out of range of input shape "
<<
tmp
;
tmp
[
param
.
axis
]
=
0
;
shape_assign
(
&
dshape
,
tmp
);
}
...
...
@@ -133,12 +133,12 @@ inline bool ConcatInferShape(const nnvm::NodeAttrs& attrs,
SHAPE_ASSIGN_CHECK
(
*
in_shape
,
i
,
dshape
);
}
if
(
!
has_zero
)
dshape
[
param
.
dim
]
=
size
;
if
(
!
has_zero
)
dshape
[
param
.
axis
]
=
size
;
SHAPE_ASSIGN_CHECK
(
*
out_shape
,
0
,
dshape
);
return
dshape
.
Size
()
!=
0
;
}
NNVM_REGISTER_OP
(
concat
)
NNVM_REGISTER_OP
(
concat
enate
)
.
describe
(
R"code(Joins input arrays along a given axis.
The dimensions of the input arrays should be the same except the axis along
...
...
@@ -152,31 +152,80 @@ Example::
y = [[3,3],[4,4],[5,5]]
z = [[6,6], [7,7],[8,8]]
concat(x,y,z,dim=0) = [[ 1., 1.],
[ 2., 2.],
[ 3., 3.],
[ 4., 4.],
[ 5., 5.],
[ 6., 6.],
[ 7., 7.],
[ 8., 8.]]
concat
enate
(x,y,z,dim=0) = [[ 1., 1.],
[ 2., 2.],
[ 3., 3.],
[ 4., 4.],
[ 5., 5.],
[ 6., 6.],
[ 7., 7.],
[ 8., 8.]]
Note that you cannot concat x,y,z along dimension 1 since dimension
0 is not the same for all the input arrays.
concat(y,z,dim=1) = [[ 3., 3., 6., 6.],
[ 4., 4., 7., 7.],
[ 5., 5., 8., 8.]]
concat
enate
(y,z,dim=1) = [[ 3., 3., 6., 6.],
[ 4., 4., 7., 7.],
[ 5., 5., 8., 8.]]
)code"
NNVM_ADD_FILELINE
)
.
set_num_outputs
(
1
)
.
set_num_inputs
(
nnvm
::
kVarg
)
.
set_attr_parser
(
ParamParser
<
ConcatenateParam
>
)
.
add_argument
(
"data"
,
"Tensor-or-Tensor[]"
,
"List of arrays to concatenate"
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ConcatInferShape
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
Concat
enate
InferShape
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
ElemwiseType
<-
1
,
1
>
)
.
add_arguments
(
ConcatParam
::
__FIELDS__
())
.
set_num_inputs
(
nnvm
::
kVarg
)
.
add_arguments
(
ConcatenateParam
::
__FIELDS__
())
.
set_support_level
(
1
);
NNVM_REGISTER_ELEMWISE_BINARY_OP
(
elemwise_add
)
.
describe
(
R"code(Element-wise add
)code"
)
.
set_support_level
(
1
);
NNVM_REGISTER_ELEMWISE_BINARY_OP
(
elemwise_sub
)
.
describe
(
R"code(Element-wise substraction
)code"
NNVM_ADD_FILELINE
)
.
set_support_level
(
1
);
NNVM_REGISTER_ELEMWISE_BINARY_OP
(
elemwise_mul
)
.
describe
(
R"code(Element-wise multiplication
)code"
NNVM_ADD_FILELINE
)
.
set_support_level
(
1
);
NNVM_REGISTER_ELEMWISE_BINARY_OP
(
elemwise_div
)
.
describe
(
R"code(Element-wise multiplication
)code"
NNVM_ADD_FILELINE
)
.
set_support_level
(
1
);
// cast
DMLC_REGISTER_PARAMETER
(
CastParam
);
inline
bool
CastInferType
(
const
nnvm
::
NodeAttrs
&
attrs
,
std
::
vector
<
int
>
*
in_attrs
,
std
::
vector
<
int
>
*
out_attrs
)
{
const
CastParam
&
param
=
nnvm
::
get
<
CastParam
>
(
attrs
.
parsed
);
CHECK_EQ
(
out_attrs
->
size
(),
1U
);
TYPE_ASSIGN_CHECK
(
*
out_attrs
,
0
,
param
.
dtype
);
return
true
;
}
NNVM_REGISTER_OP
(
cast
)
.
describe
(
R"code(Cast the content of input to dtype.
)code"
NNVM_ADD_FILELINE
)
.
add_argument
(
"data"
,
"Tensor"
,
"Input data array"
)
.
set_attr_parser
(
ParamParser
<
CastParam
>
)
.
set_attr
<
FInferShape
>
(
"FInferShape"
,
ElemwiseShape
<
1
,
1
>
)
.
set_attr
<
FInferType
>
(
"FInferType"
,
CastInferType
)
.
add_arguments
(
CastParam
::
__FIELDS__
())
.
set_num_inputs
(
1
)
.
set_num_outputs
(
1
)
.
set_support_level
(
1
);
}
// namespace top
}
// namespace nnvm
nnvm/tests/python/test_gradient.py
deleted
100644 → 0
View file @
be1660b1
import
json
import
nnvm.symbol
as
sym
import
nnvm.graph
as
graph
def
grad
(
ys
,
xs
,
ys_grads
):
g
=
graph
.
create
(
ys
)
g
.
_set_symbol_list_attr
(
'grad_ys'
,
ys
)
g
.
_set_symbol_list_attr
(
'grad_xs'
,
xs
)
g
.
_set_symbol_list_attr
(
'grad_ys_out_grad'
,
ys_grads
)
return
g
.
apply
(
'Gradient'
)
def
test_graph_gradient
():
x0
=
sym
.
Variable
(
'x0'
)
x1
=
sym
.
Variable
(
'x1'
)
yg
=
sym
.
Variable
(
'yg'
)
y
=
sym
.
exp
(
sym
.
mul
(
x0
,
x1
))
grad_graph
=
grad
(
y
,
[
x0
],
yg
)
print
(
"Original graph"
)
print
(
y
.
debug_str
())
print
(
"Gradient graph"
)
print
(
grad_graph
.
symbol
.
debug_str
())
if
__name__
==
"__main__"
:
test_graph_gradient
()
nnvm/tests/python/test_graph.py
View file @
986caf71
...
...
@@ -4,7 +4,7 @@ import nnvm.graph as graph
def
test_json_pass
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
,
stride
=
(
2
,
2
)
)
y
=
sym
.
dense
(
data
=
x
,
name
=
'conv'
,
units
=
30
)
g
=
graph
.
create
(
y
)
ret
=
g
.
apply
(
'SaveJSON'
)
ret
.
_set_json_attr
(
'json'
,
ret
.
json_attr
(
'json'
))
...
...
@@ -14,12 +14,11 @@ def test_json_pass():
def
test_json_pass_with_attr
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
,
stride
=
(
2
,
2
)
)
y
=
sym
.
dense
(
data
=
x
,
name
=
'fc'
,
units
=
30
)
g
=
graph
.
create
(
y
)
g
.
_set_json_attr
(
'version'
,
'0.1.0'
)
ret
=
g
.
apply
(
'SaveJSON'
)
json_str
=
ret
.
json_attr
(
'json'
)
print
(
json_str
)
ret
.
_set_json_attr
(
'json'
,
json_str
)
g2
=
ret
.
apply
(
'LoadJSON'
)
assert
g2
.
json_attr
(
'version'
)
==
'0.1.0'
...
...
@@ -27,42 +26,21 @@ def test_json_pass_with_attr():
def
test_graph_json_attr
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
,
stride
=
(
2
,
2
)
)
y
=
sym
.
dense
(
data
=
x
,
name
=
'fc'
,
units
=
30
)
g
=
graph
.
create
(
y
)
g
.
_set_json_attr
(
'ilist'
,
[
1
,
2
,
3
],
'list_int'
)
assert
g
.
json_attr
(
'ilist'
)
==
[
1
,
2
,
3
]
def
test_order_mutation_pass
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
,
dev
=
'gpu'
)
y
=
sym
.
add
(
y
,
x
,
name
=
'add1'
)
# write after read
z
=
sym
.
assign
(
x
,
y
,
name
=
'assign'
)
# read after write
t
=
sym
.
add
(
y
,
x
,
name
=
'add2'
)
g
=
graph
.
create
(
sym
.
Group
([
t
,
z
]))
jgraph
=
json
.
loads
(
g
.
apply
([
'OrderMutation'
,
'SaveJSON'
])
.
json_attr
(
'json'
))
jnodes
=
jgraph
[
'nodes'
]
nindex
=
{
n
[
'name'
]:
i
for
i
,
n
in
enumerate
(
jnodes
)}
assert
nindex
[
'assign'
]
in
jnodes
[
nindex
[
'add2'
]][
'control_deps'
]
assert
nindex
[
'conv'
]
in
jnodes
[
nindex
[
'assign'
]][
'control_deps'
]
assert
nindex
[
'add1'
]
in
jnodes
[
nindex
[
'assign'
]][
'control_deps'
]
assert
jnodes
[
nindex
[
'assign'
]][
'inputs'
][
0
][
2
]
==
1
def
test_list_args
():
x
=
sym
.
Variable
(
'x'
)
z
=
sym
.
Variable
(
'z'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
,
dev
=
'gpu'
)
y
=
sym
.
add
(
y
,
z
,
name
=
'add1'
)
# write after read
z
=
sym
.
assign
(
x
,
y
,
name
=
'assign'
)
assert
z
.
list_input_names
(
'read_only'
)
==
[
'conv_weight'
,
'z'
]
assert
z
.
list_input_names
(
'aux_state'
)
==
[
'x'
]
y
=
sym
.
dense
(
data
=
x
,
name
=
'fc'
,
units
=
30
)
y
=
sym
.
elemwise_add
(
y
,
z
,
name
=
'add1'
)
def
test_infer_shape
():
x
=
sym
.
Variable
(
'x'
,
shape
=
(
4
,
2
))
y
=
sym
.
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
reshape
(
y
,
target
=
(
2
,
4
),
name
=
"reshape1
"
)
x
=
sym
.
Variable
(
'x'
,
shape
=
(
2
,
4
,
2
))
y
=
sym
.
elemwise_
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
flatten
(
y
,
name
=
"flatten
"
)
g
=
graph
.
create
(
y
)
g
.
_set_json_attr
(
"shape_attr_key"
,
"shape"
)
g
=
g
.
apply
(
'InferShape'
)
...
...
@@ -70,28 +48,28 @@ def test_infer_shape():
jnodes
=
jgraph
[
'nodes'
]
jnode_row_ptr
=
jgraph
[
'node_row_ptr'
]
nindex
=
{
n
[
'name'
]:
i
for
i
,
n
in
enumerate
(
jnodes
)}
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"
reshape1"
]]]
==
[
2
,
4
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"add1"
]]]
==
[
4
,
2
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"
flatten"
]]]
==
[
2
,
8
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"add1"
]]]
==
[
2
,
4
,
2
]
def
test_infer_shape_known_partial
():
x
=
sym
.
Variable
(
'x'
,
shape
=
(
4
,
2
)
)
y
=
sym
.
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
reshape
(
y
,
target
=
(
2
,
4
),
name
=
"reshape
1"
)
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
elemwise_
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
flatten
(
y
,
name
=
"flatten
1"
)
g
=
graph
.
create
(
y
)
jgraph
=
json
.
loads
(
g
.
apply
(
'SaveJSON'
)
.
json_attr
(
'json'
))
shape
=
[[
4
,
2
],
[]
,
[]]
shape
=
[[
2
,
4
,
2
],
[]
,
[]]
g
.
_set_json_attr
(
"shape"
,
shape
,
'list_shape'
)
g
=
g
.
apply
(
"InferShape"
)
jnodes
=
jgraph
[
'nodes'
]
jnode_row_ptr
=
jgraph
[
'node_row_ptr'
]
nindex
=
{
n
[
'name'
]:
i
for
i
,
n
in
enumerate
(
jnodes
)}
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"
reshape1"
]]]
==
[
2
,
4
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"add1"
]]]
==
[
4
,
2
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"
flatten1"
]]]
==
[
2
,
8
]
assert
g
.
json_attr
(
'shape'
)[
jnode_row_ptr
[
nindex
[
"add1"
]]]
==
[
2
,
4
,
2
]
def
test_infer_type
():
x
=
sym
.
Variable
(
'x'
,
dtype
=
0
)
y
=
sym
.
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
cast
(
y
,
dtype
=
1
,
name
=
"cast1"
)
y
=
sym
.
elemwise_
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
cast
(
y
,
dtype
=
"float64"
,
name
=
"cast1"
)
g
=
graph
.
create
(
y
)
g
.
_set_json_attr
(
"dtype_attr_key"
,
"dtype"
)
g
=
g
.
apply
(
'InferType'
)
...
...
@@ -102,31 +80,12 @@ def test_infer_type():
assert
g
.
json_attr
(
'dtype'
)[
jnode_row_ptr
[
nindex
[
"cast1"
]]]
==
1
assert
g
.
json_attr
(
'dtype'
)[
jnode_row_ptr
[
nindex
[
"add1"
]]]
==
0
def
test_place_device
():
x
=
sym
.
Variable
(
'x'
,
device_group
=
"stage1"
)
y
=
sym
.
add
(
x
,
x
,
name
=
'add1'
)
y
=
sym
.
cast
(
y
,
dtype
=
1
,
name
=
"cast1"
)
z
=
sym
.
add
(
y
,
y
,
device_group
=
"stage2"
,
name
=
"add2"
)
z
=
sym
.
add
(
z
,
sym
.
exp
(
y
,
device_group
=
"stage2"
),
name
=
"add3"
)
g
=
graph
.
create
(
z
)
g
.
_set_json_attr
(
"device_group_attr_key"
,
"device_group"
)
g
.
_set_json_attr
(
"device_assign_map"
,
{
"stage1"
:
0
,
"stage2"
:
1
},
"dict_str_int"
)
g
.
_set_json_attr
(
"device_copy_op"
,
"cross_device_copy"
)
g
=
g
.
apply
(
"PlaceDevice"
)
jgraph
=
json
.
loads
(
g
.
apply
(
'SaveJSON'
)
.
json_attr
(
'json'
))
jnodes
=
jgraph
[
'nodes'
]
jnode_row_ptr
=
jgraph
[
'node_row_ptr'
]
nindex
=
{
n
[
'name'
]:
i
for
i
,
n
in
enumerate
(
jnodes
)}
assert
g
.
json_attr
(
'device'
)[
jnode_row_ptr
[
nindex
[
"add2"
]]]
==
1
assert
g
.
json_attr
(
'device'
)[
jnode_row_ptr
[
nindex
[
"add3"
]]]
==
1
assert
g
.
json_attr
(
'device'
)[
jnode_row_ptr
[
nindex
[
"cast1"
]]]
==
0
def
test_plan_memory
():
x
=
sym
.
Variable
(
'x'
,
shape
=
(
4
,
2
))
x2
=
sym
.
add
(
x
,
x
,
name
=
'addk'
)
y
=
sym
.
reshape
(
x2
,
target
=
(
2
,
4
)
,
name
=
"reshapek"
)
y
=
sym
.
add
(
y
,
x2
,
name
=
"add2"
)
y
=
sym
.
add
(
y
,
y
)
x2
=
sym
.
elemwise_
add
(
x
,
x
,
name
=
'addk'
)
y
=
sym
.
flatten
(
x2
,
name
=
"reshapek"
)
y
=
sym
.
elemwise_
add
(
y
,
x2
,
name
=
"add2"
)
y
=
sym
.
elemwise_
add
(
y
,
y
)
g
=
graph
.
create
(
y
)
g
.
_set_json_attr
(
"shape_attr_key"
,
"shape"
)
g
=
g
.
apply
([
"InferShape"
,
"InferType"
,
"PlanMemory"
])
...
...
@@ -143,12 +102,10 @@ def test_plan_memory():
if
__name__
==
"__main__"
:
test_json_pass_with_attr
()
test_order_mutation_pass
()
test_graph_json_attr
()
test_json_pass
()
test_infer_shape
()
test_infer_shape_known_partial
()
test_infer_type
()
test_place_device
()
test_plan_memory
()
test_list_args
()
nnvm/tests/python/test_symbol.py
View file @
986caf71
...
...
@@ -3,17 +3,13 @@ from nnvm import NNVMError
def
test_dense
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
dense
(
x
)
assert
y
.
list_input_names
()
==
[
'x'
]
y
=
sym
.
dense
(
x
,
units
=
30
,
name
=
"fc"
)
assert
y
.
list_input_names
()
==
[
"x"
,
"fc_weight"
,
"fc_bias"
]
def
test_compose
():
x
=
sym
.
Variable
(
'x'
)
z
=
sym
.
Variable
(
'z'
)
y
=
sym
.
exp
(
sym
.
add
(
x
,
x
,
name
=
'add'
,
gpu
=
2
),
y
=
sym
.
exp
(
sym
.
elemwise_
add
(
x
,
x
,
name
=
'add'
,
gpu
=
2
),
name
=
'exp'
,
gpu
=
1
,
attr
=
{
"kk"
:
"1"
})
assert
y
.
list_input_names
()
==
[
'x'
]
...
...
@@ -25,24 +21,12 @@ def test_compose():
def
test_default_input
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
)
assert
y
.
list_input_names
()
==
[
'x'
,
'
conv
_weight'
]
y
=
sym
.
dense
(
data
=
x
,
units
=
30
,
name
=
'fc'
,
use_bias
=
False
)
assert
y
.
list_input_names
()
==
[
'x'
,
'
fc
_weight'
]
tname
=
[
z
.
list_output_names
()[
0
]
for
z
in
y
.
list_input_variables
()]
assert
tname
==
y
.
list_input_names
()
try
:
z
=
sym
.
add
(
x
)
assert
False
except
NNVMError
:
pass
def
test_mutate_input
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
)
z
=
sym
.
assign
(
x
,
y
)
t
=
sym
.
add
(
z
,
x
)
try
:
z
=
sym
.
assign
(
z
,
z
)
z
=
sym
.
elemwise_add
(
x
)
assert
False
except
NNVMError
:
pass
...
...
@@ -50,7 +34,7 @@ def test_mutate_input():
def
test_copy
():
x
=
sym
.
Variable
(
'x'
)
z
=
sym
.
Variable
(
'z'
)
y
=
sym
.
exp
(
sym
.
add
(
x
,
x
,
name
=
'add'
,
gpu
=
2
),
y
=
sym
.
exp
(
sym
.
elemwise_
add
(
x
,
x
,
name
=
'add'
,
gpu
=
2
),
name
=
'exp'
,
gpu
=
1
,
attr
=
{
"kk"
:
"1"
})
assert
y
.
__copy__
()
.
debug_str
()
==
y
.
debug_str
()
...
...
@@ -62,18 +46,8 @@ def test_op_name():
op_func
=
sym
.
__dict__
[
op_name
]
z
=
op_func
(
x
)
def
test_control_dep
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
conv2d
(
data
=
x
,
name
=
'conv'
)
z
=
sym
.
assign
(
x
,
y
)
t
=
sym
.
add
(
x
,
x
)
t
.
_add_control_deps
([
z
,
y
])
if
__name__
==
"__main__"
:
test_op_name
()
test_copy
()
test_default_input
()
test_compose
()
test_mutate_input
()
test_control_dep
()
nnvm/tests/python/test_top_level1.py
View file @
986caf71
import
nnvm.symbol
as
sym
from
nnvm
import
NNVMError
def
test_
dense
():
def
test_
fullc
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
dense
(
x
,
units
=
3
,
name
=
"dense"
)
assert
y
.
list_input_names
()
==
[
'x'
,
'dense_weight'
,
'dense_bias'
]
x1
=
sym
.
dense
(
x
,
units
=
3
,
name
=
"dense"
)
x2
=
sym
.
flatten
(
x1
)
x3
=
sym
.
softmax
(
x2
)
assert
x2
.
list_input_names
()
==
[
'x'
,
'dense_weight'
,
'dense_bias'
]
def
test_concat
():
def
test_concat
enate
():
x
=
sym
.
Variable
(
'x'
)
y
=
sym
.
Variable
(
'y'
)
y
=
sym
.
concat
(
x
,
y
)
y
=
sym
.
concat
enate
(
x
,
y
)
assert
y
.
list_input_names
()
==
[
'x'
,
'y'
]
def
test_unary
():
x
=
sym
.
Variable
(
'x'
)
x
=
sym
.
exp
(
x
)
x
=
sym
.
log
(
x
)
x
=
sym
.
sigmoid
(
x
)
x
=
sym
.
tanh
(
x
)
assert
x
.
list_input_names
()
==
[
'x'
]
def
test_batchnorm
():
x
=
sym
.
Variable
(
'x'
)
x
=
sym
.
batch_norm
(
x
,
name
=
"bn"
)
assert
x
.
list_input_names
()
==
[
"x"
,
"bn_gamma"
,
"bn_beta"
,
"bn_moving_mean"
,
"bn_moving_var"
]
if
__name__
==
"__main__"
:
test_concat
()
test_dense
()
test_concatenate
()
test_fullc
()
test_unary
()
test_batchnorm
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment