Commit 13388655 by Tianqi Chen

[TOP] Level 3 complete (#7)

parent 31eb2c5a
......@@ -53,7 +53,10 @@ mxnet_source_group("Source\\core" GLOB "src/core/*.cc")
mxnet_source_group("Source\\pass" GLOB "src/pass/*.cc")
FILE(GLOB_RECURSE SOURCE "src/*.cc" "src/*.h" "include/*.h")
FILE(GLOB_RECURSE SOURCE
src/c_api/*.cc
src/core/*.cc
src/pass/*.cc)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include)
......
......@@ -51,10 +51,10 @@ else
NO_WHOLE_ARCH= --no-whole-archive
endif
all: lib/libnnvm.a lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX)
all: lib/libnnvm.a lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX)
SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc)
SRC_TOP = $(wildcard src/top/*.cc)
SRC_TOP = $(wildcard src/top/*.cc, src/top/*/*.cc)
ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC))
TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP))
ALL_DEP = $(ALL_OBJ)
......@@ -72,7 +72,7 @@ lib/libnnvm.a: $(ALL_DEP)
@mkdir -p $(@D)
ar crv $@ $(filter %.o, $?)
lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH}
......
......@@ -54,15 +54,15 @@ struct DropoutParam : public dmlc::Parameter<DropoutParam> {
struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
int axis;
float epsilon;
float momentum;
double epsilon;
double momentum;
bool center;
bool scale;
DMLC_DECLARE_PARAMETER(BatchNormParam) {
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Specify which shape axis the channel is specified.");
DMLC_DECLARE_FIELD(epsilon).set_default(1e-5f)
DMLC_DECLARE_FIELD(epsilon).set_default(1e-5)
.describe("Small float added to variance to avoid dividing by zero.");
DMLC_DECLARE_FIELD(center).set_default(true)
.describe("If True, add offset of `beta` to normalized tensor."
......@@ -81,6 +81,8 @@ struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
static const constexpr int kMovingVariance = 4;
};
// Shared by softmax and log_softmax
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
......@@ -90,12 +92,12 @@ struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
}
};
struct LogSoftmaxParam : public dmlc::Parameter<LogSoftmaxParam> {
int axis;
struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
double alpha;
DMLC_DECLARE_PARAMETER(LogSoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis to sum over when computing softmax.");
DMLC_DECLARE_PARAMETER(LeakyReLUParam) {
DMLC_DECLARE_FIELD(alpha).set_lower_bound(0.0).set_default(0.25)
.describe("slope coefficient for the negative half axis.");
}
};
......
......@@ -40,6 +40,24 @@ struct CastParam : public dmlc::Parameter<CastParam> {
}
};
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
Tuple<int64_t> shape;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape);
}
};
struct ScalarParam : public dmlc::Parameter<ScalarParam> {
double scalar;
DMLC_DECLARE_PARAMETER(ScalarParam) {
DMLC_DECLARE_FIELD(scalar);
}
};
} // namespace top
} // namespace nnvm
......
......@@ -26,7 +26,7 @@ def find_lib_path():
if hasattr(__builtin__, "NNVM_LIBRARY_NAME"):
lib_name = __builtin__.NNVM_LIBRARY_NAME
else:
lib_name = "libnnvm_example"
lib_name = "libnnvm_top"
api_path = os.path.join(base_path, '../../lib/')
cmake_build_path = os.path.join(base_path, '../../build/Release/')
......
......@@ -7,8 +7,8 @@
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
#include <nnvm/top/nn.h>
#include "./op_common.h"
#include "./elemwise_op_common.h"
#include "../op_common.h"
#include "../elemwise_op_common.h"
namespace nnvm {
namespace top {
......@@ -126,6 +126,25 @@ NNVM_REGISTER_OP(dropout)
// batchnorm
DMLC_REGISTER_PARAMETER(BatchNormParam);
inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape) {
CHECK_EQ(in_shape->size(), 5U)
<< "Input:[data, gamma, beta, moving_mean, moving_var]";
CHECK_EQ(out_shape->size(), 3U);
const TShape &dshape = in_shape->at(0);
if (dshape.ndim() == 0) return false;
TShape bshape({dshape[1]});
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 1, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 2, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 3, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 4, bshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 0, dshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 1, bshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 2, bshape);
return true;
}
NNVM_REGISTER_OP(batch_norm)
.describe(R"(Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
......@@ -167,6 +186,8 @@ axis to be the last item in the input shape.
.set_num_inputs(5)
.set_num_outputs(3)
.set_attr_parser(ParamParser<BatchNormParam>)
.set_attr<FInferShape>("FInferShape", BatchNormInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<5, 3>)
.set_attr<FListInputNames>("FListInputNames", [](const NodeAttrs& attrs) {
return std::vector<std::string>{"data", "gamma", "beta", "moving_mean", "moving_var"};
})
......@@ -198,8 +219,6 @@ NNVM_REGISTER_OP(softmax)
.set_support_level(1);
// log_softmax
DMLC_REGISTER_PARAMETER(LogSoftmaxParam);
NNVM_REGISTER_OP(log_softmax)
.describe(R"code(Computes softmax.
......@@ -208,7 +227,23 @@ NNVM_REGISTER_OP(log_softmax)
)code" NNVM_ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LogSoftmaxParam>)
.set_attr_parser(ParamParser<SoftmaxParam>)
.set_attr<FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1);
// leaky_rlu
DMLC_REGISTER_PARAMETER(LeakyReLUParam);
NNVM_REGISTER_OP(leaky_relu)
.describe(R"code(Leaky version of a Rectified Linear Unit.
`y = x > 0 ? x : alpha * x`
)code" NNVM_ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LeakyReLUParam>)
.set_attr<FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1);
......
/*!
* Copyright (c) 2017 by Contributors
* \file elemwise.cc
* \brief Elemenwise operators
*/
#include <nnvm/op.h>
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
#include <nnvm/top/tensor.h>
#include "../op_common.h"
#include "../elemwise_op_common.h"
namespace nnvm {
namespace top {
// sigmoid
NNVM_REGISTER_ELEMWISE_UNARY_OP(sigmoid)
.describe(R"code(Computes sigmoid.
.. math::
y = 1 / (1 + exp(-x))
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// tanh
NNVM_REGISTER_ELEMWISE_UNARY_OP(tanh)
.describe(R"code(Returns the hyperbolic tangent of the input array, computed element-wise.
.. math::
tanh(x) = sinh(x) / cosh(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// exp
NNVM_REGISTER_ELEMWISE_UNARY_OP(exp)
.describe(R"code(Returns the exp input array, computed element-wise.
.. math::
exp(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// log
NNVM_REGISTER_ELEMWISE_UNARY_OP(log)
.describe(R"code(Returns the log input array, computed element-wise.
.. math::
log(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// binary ops
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_add)
.describe(R"code(Element-wise add
)code")
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_sub)
.describe(R"code(Element-wise substraction
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_mul)
.describe(R"code(Element-wise multiplication
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_div)
.describe(R"code(Element-wise multiplication
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// negative
NNVM_REGISTER_ELEMWISE_UNARY_OP(negative)
.describe(R"code(Elemenwise numeric negative
)code" NNVM_ADD_FILELINE)
.set_support_level(3);
// copy
NNVM_REGISTER_ELEMWISE_UNARY_OP(copy)
.describe(R"code(Copy tensor to another one.
)code" NNVM_ADD_FILELINE)
.set_support_level(3);
// unary scalar op
DMLC_REGISTER_PARAMETER(ScalarParam);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__add_scalar__)
.describe(R"code(Tensor add scalar
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__sub_scalar__)
.describe(R"code(Tensor substract scalar
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__rsub_scalar__)
.describe(R"code(scalar substract Tensor
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__mul_scalar__)
.describe(R"code(Tensor multiplies scalar
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__div_scalar__)
.describe(R"code(Tensor divides scalar
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__rdiv_scalar__)
.describe(R"code(scalar divides Tensor
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__pow_scalar__)
.describe(R"code(Tensor power scalar
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
NNVM_REGISTER_ELEMWISE_UNARY_OP(__rpow_scalar__)
.describe(R"code(scalar power Tensor
)code" NNVM_ADD_FILELINE)
.set_attr_parser(ParamParser<ScalarParam>)
.set_support_level(3);
} // namespace top
} // namespace nnvm
/*!
* Copyright (c) 2017 by Contributors
* \file tensor.cc
* \brief Property def of tensor operators.
* \file transform.cc
* \brief Injective transformation of shape or type.
*/
#include <nnvm/op.h>
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
#include <nnvm/top/tensor.h>
#include "./op_common.h"
#include "./elemwise_op_common.h"
#include "../op_common.h"
#include "../elemwise_op_common.h"
namespace nnvm {
namespace top {
// sigmoid
NNVM_REGISTER_ELEMWISE_UNARY_OP(sigmoid)
.describe(R"code(Computes sigmoid.
.. math::
y = 1 / (1 + exp(-x))
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// tanh
NNVM_REGISTER_ELEMWISE_UNARY_OP(tanh)
.describe(R"code(Returns the hyperbolic tangent of the input array, computed element-wise.
.. math::
tanh(x) = sinh(x) / cosh(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// exp
NNVM_REGISTER_ELEMWISE_UNARY_OP(exp)
.describe(R"code(Returns the exp input array, computed element-wise.
.. math::
exp(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// log
NNVM_REGISTER_ELEMWISE_UNARY_OP(log)
.describe(R"code(Returns the log input array, computed element-wise.
.. math::
log(x)
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// flatten
inline bool FlattenInferShape(const nnvm::NodeAttrs& attrs,
inline bool FlattenInferShape(const NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
......@@ -100,7 +61,7 @@ Example::
// concatenate
DMLC_REGISTER_PARAMETER(ConcatenateParam);
inline bool ConcatenateInferShape(const nnvm::NodeAttrs& attrs,
inline bool ConcatenateInferShape(const NodeAttrs& attrs,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape) {
const ConcatenateParam& param = nnvm::get<ConcatenateParam>(attrs.parsed);
......@@ -170,7 +131,7 @@ Example::
)code" NNVM_ADD_FILELINE)
.set_num_outputs(1)
.set_num_inputs(nnvm::kVarg)
.set_num_inputs(kVarg)
.set_attr_parser(ParamParser<ConcatenateParam>)
.add_argument("data", "Tensor-or-Tensor[]", "List of arrays to concatenate")
.set_attr<FInferShape>("FInferShape", ConcatenateInferShape)
......@@ -178,34 +139,11 @@ Example::
.add_arguments(ConcatenateParam::__FIELDS__())
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_add)
.describe(R"code(Element-wise add
)code")
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_sub)
.describe(R"code(Element-wise substraction
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_mul)
.describe(R"code(Element-wise multiplication
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
NNVM_REGISTER_ELEMWISE_BINARY_OP(elemwise_div)
.describe(R"code(Element-wise multiplication
)code" NNVM_ADD_FILELINE)
.set_support_level(1);
// cast
DMLC_REGISTER_PARAMETER(CastParam);
inline bool CastInferType(const nnvm::NodeAttrs& attrs,
inline bool CastInferType(const NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const CastParam& param = nnvm::get<CastParam>(attrs.parsed);
......@@ -227,5 +165,155 @@ NNVM_REGISTER_OP(cast)
.set_num_outputs(1)
.set_support_level(1);
// reshape
DMLC_REGISTER_PARAMETER(ReshapeParam);
inline bool ReshapeInferShape(const NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ReshapeParam& param = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_GT(param.shape.ndim(), 0);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const TShape &dshape = (*in_attrs)[0];
if (dshape.ndim() == 0) return false;
const Tuple<int64_t>& target_shape = param.shape;
std::vector<int64_t> oshape;
dim_t src_idx = 0;
int infer_idx = -1;
for (dim_t i = 0; i < target_shape.ndim(); ++i) {
int svalue = target_shape[i];
// special flag handling for shape inference.
if (svalue > 0) {
oshape.push_back(svalue);
++src_idx;
} else if (svalue == 0) {
// keep same
CHECK_LT(src_idx, dshape.ndim());
oshape.push_back(dshape[src_idx++]);
} else if (svalue == -1) {
// inference based on rest
CHECK_LT(infer_idx, 0)
<< "One and only one dim can be inferred";
infer_idx = i;
oshape.push_back(1);
++src_idx;
} else if (svalue == -2) {
// copy all remaining dims from source
while (src_idx < dshape.ndim()) {
oshape.push_back(dshape[src_idx++]);
}
} else if (svalue == -3) {
// merge two dims from source
CHECK_LT(src_idx + 1, dshape.ndim());
dim_t d1 = dshape[src_idx++];
dim_t d2 = dshape[src_idx++];
oshape.push_back(d1 * d2);
} else if (svalue == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, target_shape.ndim());
CHECK_LT(src_idx, dshape.ndim());
dim_t d0 = dshape[src_idx++];
int d1 = target_shape[++i];
int d2 = target_shape[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1) d1 = d0 / d2;
if (d2 == -1) d2 = d0 / d1;
CHECK_EQ(d1 * d2, static_cast<int>(d0)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
oshape.push_back(d1);
oshape.push_back(d2);
}
}
if (infer_idx >= 0) {
if (dshape.Size() > 0) {
int new_size = 1;
for (int x : oshape) {
new_size *= x;
}
oshape[infer_idx] = dshape.Size() / new_size;
} else {
oshape[infer_idx] = 0;
}
}
TShape out_shape(oshape.begin(), oshape.end());
CHECK_EQ(out_shape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << out_shape
<< "\nSource: " << dshape;
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_attrs, 0, out_shape);
return true;
}
NNVM_REGISTER_OP(reshape)
.describe(R"code(Reshapes the input array.
Given an array and a shape, this function returns a copy of the array in the new shape.
The shape is a tuple of integers such as (2,3,4).The size of the new shape should be same as the size of the input array.
Example::
reshape([1,2,3,4], shape=(2,2)) = [[1,2], [3,4]]
To give user more convenience in without doing manual shape inference,
some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}.
The significance of each is explained below:
- ``0`` copy this dimension from the input to the output shape.
Example::
- input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2)
- input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4)
- ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions
keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Example::
- input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4)
- input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8)
- input shape = (2,3,4), shape=(-1,), output shape = (24,)
- ``-2`` copy all/remainder of the input dimensions to the output shape.
Example::
- input shape = (2,3,4), shape = (-2,), output shape = (2,3,4)
- input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4)
- input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1)
- ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension.
Example::
- input shape = (2,3,4), shape = (-3,4), output shape = (6,4)
- input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20)
- input shape = (2,3,4), shape = (0,-3), output shape = (2,12)
- input shape = (2,3,4), shape = (-3,-2), output shape = (6,4)
- ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1).
Example::
- input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4)
- input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4)
)code" NNVM_ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<ReshapeParam>)
.set_attr<FInferShape>("FInferShape", ReshapeInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.add_argument("data", "Tensor", "Input data.")
.set_support_level(3);
} // namespace top
} // namespace nnvm
import json
import nnvm.symbol as sym
import nnvm.graph as graph
def infer_shape(sym):
g = graph.create(sym)
g._set_json_attr("shape_attr_key", "shape")
g = g.apply("InferShape")
jgraph = json.loads(g.apply("SaveJSON").json_attr("json"))
jnodes = jgraph["nodes"]
jnode_row_ptr = jgraph["node_row_ptr"]
sdict = {}
vshape = g.json_attr("shape")
for i, n in enumerate(jnodes):
begin, end = jnode_row_ptr[i], jnode_row_ptr[i + 1]
sdict[n["name"]] = vshape[begin:end]
return sdict
# Level 1
def test_dense():
x = sym.Variable("x", shape=(10, 20))
y = sym.dense(x, units=30, name="fc")
sdict = infer_shape(y)
assert(sdict["fc"][0] == [10, 30])
assert(sdict["fc_bias"][0] == [30])
def test_concatenate():
x1 = sym.Variable("x", shape=(10, 20))
x2 = sym.Variable("y", shape=(10, 30))
z = sym.concatenate(x1, x2, name="concat")
sdict = infer_shape(z)
assert(sdict["concat"][0] == [10, 50])
z = sym.concatenate(x1, x1, axis=0, name="concat")
sdict = infer_shape(z)
assert(sdict["concat"][0] == [20, 20])
def test_batchnorm():
x = sym.Variable("x", shape=(10, 20))
y = sym.batch_norm(1 / x, name="bn")
sdict = infer_shape(y)
assert(sdict["bn_gamma"][0] == [20])
def test_flatten():
x = sym.Variable("x", shape=(10, 20, 10))
y = sym.flatten(x) * 2
y = sym.exp(y, name="y")
sdict = infer_shape(y)
assert(sdict["y"][0] == [10, 200])
# Level 3
def test_reshape():
def check(in_shape, tshape, out_shape):
x = sym.Variable("x", shape=in_shape)
y = sym.reshape(x, shape=tshape, name="y")
sdict = infer_shape(y)
assert(tuple(sdict["y"][0]) == tuple(out_shape))
check((4,), (2, 2), (2, 2))
check((2, 3, 4), (4, 0, 2), (4, 3, 2))
check((2, 3, 4), (2, 0, 0), (2, 3, 4))
check((2, 3, 4), (6, 1, -1), (6, 1, 4))
check((2, 3, 4), (3, -1, 8), (3, 1, 8))
check((2, 3, 4), (-1,), (24,))
check((2, 3, 4), (-2,), (2, 3, 4))
check((2, 3, 4), (2, -2), (2, 3, 4))
check((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))
check((2, 3, 4), (-3, 4), (6, 4))
check((2, 3, 4, 5), (-3, -3), (6, 20))
check((2, 3, 4), (0, -3), (2, 12))
check((2, 3, 4), (-3, -2), (6, 4))
check((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))
check((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))
if __name__ == "__main__":
test_dense()
test_concatenate()
test_batchnorm()
test_flatten()
test_reshape()
import nnvm.symbol as sym
def test_reshape():
x = sym.Variable("x")
y = sym.reshape(x, shape=(10, 20), name="y")
assert(y.list_input_names() == ["x"])
def test_scalar_op():
x = sym.Variable("x")
y = (1 / (x * 2) - 1) ** 2
assert(y.list_input_names() == ["x"])
def test_leaky_relu():
x = sym.Variable("x")
y = sym.leaky_relu(x, alpha=0.1)
assert(y.list_input_names() == ["x"])
if __name__ == "__main__":
test_scalar_op()
test_reshape()
test_leaky_relu()
......@@ -37,22 +37,22 @@ if [ ${TASK} == "python_test" ]; then
make clean
make -j all || exit -1
if [ ${TRAVIS_OS_NAME} == "osx" ]; then
python -m nose tests/python/ || exit -1
python3 -m nose tests/python/ || exit -1
python -m nose tests/python/unittest/ || exit -1
python3 -m nose tests/python/unittest/ || exit -1
else
nosetests tests/python/ || exit -1
nosetests3 tests/python/ || exit -1
nosetests tests/python/unittest/ || exit -1
nosetests3 tests/python/unittest/ || exit -1
fi
make cython || exit -1
make cython3 || exit -1
if [ ${TRAVIS_OS_NAME} == "osx" ]; then
python -m nose tests/python/ || exit -1
python3 -m nose tests/python/ || exit -1
python -m nose tests/python/unittest/ || exit -1
python3 -m nose tests/python/unittest/ || exit -1
else
nosetests tests/python/ || exit -1
nosetests3 tests/python/ || exit -1
nosetests tests/python/unittest/ || exit -1
nosetests3 tests/python/unittest/ || exit -1
fi
exit 0
fi
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment