Commit 3e4d9662 by tqchen Committed by Tianqi Chen

[TOP] Init dense

parent d0bcd61f
...@@ -53,9 +53,11 @@ endif ...@@ -53,9 +53,11 @@ endif
all: lib/libnnvm.a lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX) all: lib/libnnvm.a lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX)
SRC = $(wildcard src/*.cc src/*/*.cc) SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc)
SRC_TOP = $(wildcard src/top/*.cc)
ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC)) ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC))
ALL_DEP = $(ALL_OBJ) $(PLUGIN_OBJ) TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP))
ALL_DEP = $(ALL_OBJ)
include tests/cpp/unittest.mk include tests/cpp/unittest.mk
...@@ -66,18 +68,13 @@ build/src/%.o: src/%.cc ...@@ -66,18 +68,13 @@ build/src/%.o: src/%.cc
$(CXX) $(CFLAGS) -MM -MT build/src/$*.o $< >build/src/$*.d $(CXX) $(CFLAGS) -MM -MT build/src/$*.o $< >build/src/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@ $(CXX) -c $(CFLAGS) -c $< -o $@
build/plugin/%.o: plugin/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build/plugin/$*.o $< >build/plugin/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
lib/libnnvm.a: $(ALL_DEP) lib/libnnvm.a: $(ALL_DEP)
@mkdir -p $(@D) @mkdir -p $(@D)
ar crv $@ $(filter %.o, $?) ar crv $@ $(filter %.o, $?)
lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX): example/src/operator.cc lib/libnnvm.a lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
@mkdir -p $(@D) @mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.cc, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH} $(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH}
cython: cython:
cd python; python setup.py build_ext --inplace cd python; python setup.py build_ext --inplace
...@@ -89,7 +86,7 @@ cyclean: ...@@ -89,7 +86,7 @@ cyclean:
rm -rf python/nnvm/*/*.so python/nnvm/*/*.dylib python/nnvm/*/*.cpp rm -rf python/nnvm/*/*.so python/nnvm/*/*.dylib python/nnvm/*/*.cpp
lint: lint:
python2 dmlc-core/scripts/lint.py nnvm cpp include src python dmlc-core/scripts/lint.py nnvm cpp include src
doc: doc:
doxygen docs/Doxyfile doxygen docs/Doxyfile
......
NNVM Core Operator Specification NNVM Core Operator Specification
================================ ================================
Each operator attributes are stored in json format.
tuples are stored as json array.
## Tier 1: Basic Operators
***Enables fully connected nets***
- **dense**
- attributes
- units: int Number of hidden units in the data.
- use_bias: bool Whether use bias
- inputs
- data, 2D Tensor
- weight, 2D Tensor
- bias, optional, 1D Tensor
- outputs
- output, 2D Tensor
- **relu**
- inputs
- data, nD Tensor
- outputs
- output, nD Tensor
...@@ -27,4 +27,9 @@ using dmlc::get; ...@@ -27,4 +27,9 @@ using dmlc::get;
} // namespace nnvm } // namespace nnvm
// describe op registration point
#define NNVM_STRINGIZE_DETAIL(x) #x
#define NNVM_STRINGIZE(x) NNVM_STRINGIZE_DETAIL(x)
#define NNVM_DESCRIBE(...) describe(__VA_ARGS__ "\n\nFrom:" __FILE__ ":" NNVM_STRINGIZE(__LINE__))
#define NNVM_ADD_FILELINE "\n\nDefined in " __FILE__ ":L" NNVM_STRINGIZE(__LINE__)
#endif // NNVM_BASE_H_ #endif // NNVM_BASE_H_
...@@ -109,6 +109,12 @@ class Op { ...@@ -109,6 +109,12 @@ class Op {
*/ */
uint32_t num_outputs = 1; uint32_t num_outputs = 1;
/*! /*!
* \brief support level of the operator,
* The lower the more priority it contains.
* This is in analogies to BLAS levels.
*/
uint32_t support_level = 10;
/*!
* \brief get number of outputs given information about the node. * \brief get number of outputs given information about the node.
* \param attrs The attribute of the node * \param attrs The attribute of the node
* \return number of outputs. * \return number of outputs.
...@@ -184,6 +190,12 @@ class Op { ...@@ -184,6 +190,12 @@ class Op {
*/ */
inline Op& set_num_inputs(uint32_t n); // NOLINT(*) inline Op& set_num_inputs(uint32_t n); // NOLINT(*)
/*! /*!
* \brief Set the support level of op.
* \param level The support level.
* \return reference to self.
*/
inline Op& set_support_level(uint32_t level); // NOLINT(*)
/*!
* \brief Set the get_num_outputs function. * \brief Set the get_num_outputs function.
* \param fn The function to be set. * \param fn The function to be set.
* \return reference to self. * \return reference to self.
...@@ -479,6 +491,11 @@ inline Op& Op::set_num_inputs(uint32_t n) { // NOLINT(*) ...@@ -479,6 +491,11 @@ inline Op& Op::set_num_inputs(uint32_t n) { // NOLINT(*)
return *this; return *this;
} }
inline Op& Op::set_support_level(uint32_t n) { // NOLINT(*)
this->support_level = n;
return *this;
}
inline Op& Op::set_num_inputs(std::function<uint32_t (const NodeAttrs& attr)> fn) { // NOLINT(*) inline Op& Op::set_num_inputs(std::function<uint32_t (const NodeAttrs& attr)> fn) { // NOLINT(*)
this->get_num_inputs = fn; this->get_num_inputs = fn;
return *this; return *this;
......
/*!
* Copyright (c) 2017 by Contributors
* \file nn.h
* \brief Auxiliary param for tensor primitive.
*/
#ifndef NNVM_TOP_NN_H_
#define NNVM_TOP_NN_H_
#include <dmlc/base.h>
#include <dmlc/parameter.h>
namespace nnvm {
namespace top {
struct DenseParam : public dmlc::Parameter<DenseParam> {
int units;
bool use_bias;
DMLC_DECLARE_PARAMETER(DenseParam) {
DMLC_DECLARE_FIELD(units).set_lower_bound(1)
.describe("Number of hidden units of the dense transformation.");
DMLC_DECLARE_FIELD(use_bias).set_default(true)
.describe("Whether to use bias parameter");
}
// constants
static const constexpr int kData = 0;
static const constexpr int kWeight = 1;
static const constexpr int kBias = 2;
};
} // namespace top
} // namespace nnvm
#endif // NNVM_TOP_NN_H_
/*!
* Copyright (c) 2017 by Contributors
* \file tensor.h
* \brief Auxiliary param for tensor primitive.
*/
#ifndef NNVM_TOP_TENSOR_H_
#define NNVM_TOP_TENSOR_H_
namespace nnvm {
namespace top {
} // namespace top
} // namespace nnvm
#endif // NNVM_TOP_TENSOR_H_
/*!
* Copyright (c) 2017 by Contributors
* \file elemwise_op_common.h
* \brief Common operator utilities
*/
#ifndef NNVM_TOP_ELEMWISE_OP_COMMON_H_
#define NNVM_TOP_ELEMWISE_OP_COMMON_H_
#include <string>
#include <vector>
#include "./op_common.h"
namespace nnvm {
namespace top {
template<typename AttrType, bool (*is_none)(const AttrType&),
bool (*assign)(AttrType*, const AttrType&), bool reverse_infer,
std::string (*attr_string)(const AttrType&),
int n_in = -1, int n_out = -1>
inline bool ElemwiseAttr(const nnvm::NodeAttrs& attrs,
std::vector<AttrType> *in_attrs,
std::vector<AttrType> *out_attrs,
const AttrType& none) {
AttrType dattr = none;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
if (n_in != -1)
in_size = static_cast<size_t>(n_in);
if (n_out != -1)
out_size = static_cast<size_t>(n_out);
auto deduce = [&](std::vector<AttrType> *vec, size_t size, const char *name) {
for (size_t i = 0; i < size; ++i) {
CHECK(assign(&dattr, (*vec)[i]))
<< "Incompatible attr in node " << attrs.name << " at " << i << "-th "
<< name << ": " << "expected " << attr_string(dattr)
<< ", got " << attr_string((*vec)[i]);
}
};
deduce(in_attrs, in_size, "input");
if (reverse_infer) deduce(out_attrs, out_size, "output");
auto write = [&](std::vector<AttrType> *vec, size_t size, const char *name) {
for (size_t i = 0; i < size; ++i) {
CHECK(assign(&(*vec)[i], dattr))
<< "Incompatible attr in node " << attrs.name << " at " << i << "-th "
<< name << ": " << "expected " << attr_string(dattr)
<< ", got " << attr_string((*vec)[i]);
}
};
write(in_attrs, in_size, "input");
write(out_attrs, out_size, "output");
if (is_none(dattr)) return false;
return true;
}
template<int n_in, int n_out>
inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
if (n_in != -1) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(n_in)) << " in operator " << attrs.name;
}
if (n_out != -1) {
CHECK_EQ(out_attrs->size(), static_cast<size_t>(n_out)) << " in operator " << attrs.name;
}
return ElemwiseAttr<TShape, shape_is_none, shape_assign, true, shape_string>(
attrs, in_attrs, out_attrs, TShape());
}
template<int n_in, int n_out>
inline bool ElemwiseType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
if (n_in != -1) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(n_in)) << " in operator " << attrs.name;
}
if (n_out != -1) {
CHECK_EQ(out_attrs->size(), static_cast<size_t>(n_out)) << " in operator " << attrs.name;
}
return ElemwiseAttr<int, type_is_none, type_assign, true, type_string>(
attrs, in_attrs, out_attrs, -1);
}
} // namespace top
} // namespace nnvm
#endif // NNVM_TOP_ELEMWISE_OP_COMMON_H_
/*!
* Copyright (c) 2017 by Contributors
* \file nn.cc
* \brief Property def of nn operators.
*/
#include <nnvm/op.h>
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
#include <nnvm/top/nn.h>
#include "./op_common.h"
#include "./elemwise_op_common.h"
namespace nnvm {
namespace top {
// dense
DMLC_REGISTER_PARAMETER(DenseParam);
inline std::vector<std::string> DenseListInputNames(const NodeAttrs& attrs) {
const DenseParam& param = nnvm::get<DenseParam>(attrs.parsed);
if (param.use_bias) {
return {"data", "weight", "bias"};
} else {
return {"data", "weight"};
}
}
inline bool DenseInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape) {
const DenseParam& param = nnvm::get<DenseParam>(attrs.parsed);
if (param.use_bias) {
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, weight, bias]";
} else {
CHECK_EQ(in_shape->size(), 2U) << "Input:[data, weight]";
}
CHECK_EQ(out_shape->size(), 1U);
TShape dshape = (*in_shape)[DenseParam::kData];
TShape oshape = (*out_shape)[0];
// require data to be known
if (dshape.ndim() == 0) return false;
dim_t num_input;
num_input = dshape.ProdShape(1, dshape.ndim());
SHAPE_ASSIGN_CHECK(*in_shape, DenseParam::kWeight, TShape({param.units, num_input}));
if (param.use_bias) {
SHAPE_ASSIGN_CHECK(*in_shape, DenseParam::kBias, TShape({param.units}));
}
SHAPE_ASSIGN_CHECK(*out_shape, 0, TShape({dshape[0], param.units}));
if (oshape.ndim() != 0) {
dshape[0] = oshape[0];
SHAPE_ASSIGN_CHECK(*in_shape, DenseParam::kData, dshape);
}
return true;
}
NNVM_REGISTER_OP(dense)
.NNVM_DESCRIBE(R"code(Applies a linear transformation: :math:`Y = XW^T + b`.
- **data**: `(x1, x2, ..., xn, input_dim)`
- **weight**: `(units, input_dim)`
- **bias**: `(units,)`
- **out**: `(x1, x2, ..., xn, num_hidden)`
The learnable parameters include both ``weight`` and ``bias``.
If ``use_bias`` is set to be false, then the ``bias`` term is ignored.
)code" NNVM_ADD_FILELINE)
.set_support_level(1)
.add_argument("data", "nD Tensor", "Input data.")
.add_argument("weight", "2D Tensor", "Weight matrix.")
.add_argument("bias", "1D Tensor", "Bias parameter.")
.add_arguments(DenseParam::__FIELDS__())
.set_attr_parser(ParamParser<DenseParam>)
.set_num_outputs(1)
.set_num_inputs([](const NodeAttrs& attrs) {
const DenseParam& param = nnvm::get<DenseParam>(attrs.parsed);
return param.use_bias ? 3 : 2;
})
.set_attr<FListInputNames>("FListInputNames", DenseListInputNames)
.set_attr<FInferShape>("FInferShape", DenseInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<-1, 1>);
} // namespace top
} // namespace nnvm
/*!
* Copyright (c) 2017 by Contributors
* \file op_common.h
* \brief Common operator utilities
*/
#ifndef NNVM_TOP_OP_COMMON_H_
#define NNVM_TOP_OP_COMMON_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <string>
#include <vector>
namespace nnvm {
namespace top {
/*! \brief exception throwed by InferShape error */
struct InferShapeError : public dmlc::Error {
/*! \brief analyze message */
std::string msg;
/*! \brief corresponding input index */
int index;
// constructor
InferShapeError(const std::string& msg_, int index)
: dmlc::Error(msg_), msg(msg_), index(index) {}
};
/*! \brief exception throwed by InferShape error */
struct InferTypeError : public dmlc::Error {
/*! \brief analyze message */
std::string msg;
/*! \brief corresponding input index */
int index;
// constructor
InferTypeError(const std::string& msg_, int index)
: dmlc::Error(msg_), msg(msg_), index(index) {}
};
/*!
* \brief Parse keyword arguments as PType arguments and save to parsed
* \tparam PType the arameter type.
* \param attrs The attributes.
*/
template<typename PType>
inline void ParamParser(nnvm::NodeAttrs* attrs) {
PType param;
try {
param.Init(attrs->dict);
} catch (const dmlc::ParamError& e) {
std::ostringstream os;
os << e.what();
os << ", in operator " << attrs->op->name << "("
<< "name=\"" << attrs->name << "\"";
for (const auto& k : attrs->dict) {
os << ", " << k.first << "=\"" << k.second << "\"";
}
os << ")";
throw dmlc::ParamError(os.str());
}
attrs->parsed = std::move(param);
}
/*! \brief check if shape is empty or contains unkown (0) dim. */
inline bool shape_is_none(const TShape& x) {
return x.ndim() == 0 || x.Size() == 0;
}
/*! \brief check if type is none (-1) */
inline bool type_is_none(const int& x) {
return x == -1;
}
/*! \brief check if shape is scalar({1}). */
inline bool shape_is_scalar(const TShape& x) {
return x.ndim() == 1 && x.Size() == 1;
}
/*! \brief get string representation of shape */
inline std::string shape_string(const TShape& x) {
std::ostringstream os;
os << x;
return os.str();
}
/*! \brief get string representation of shape */
inline std::string type_string(const int& x) {
return std::to_string(x);
}
/*!
* \brief Assign x to y. Checks for compatiblity when y is not empty.
* Allow missing dim in both x and y (as 0).
* \param y target shape.
* \param x source shape.
* \return whether x and y are compatible.
*/
inline bool shape_assign(TShape *y, const TShape& x) {
if (y->ndim() == 0) {
*y = x;
return true;
} else if (y->ndim() != x.ndim()) {
return x.ndim() == 0;
} else {
for (size_t i = 0; i < y->ndim(); ++i) {
if ((*y)[i] == 0) {
(*y)[i] = x[i];
} else if ((*y)[i] != x[i] && x[i] != 0) {
return false;
}
}
return true;
}
}
/*!
* \brief Assign x to y. Checks for compatiblity when y is not -1.
* \param y target type.
* \param x source type.
* \return whether x and y are compatible.
*/
inline bool type_assign(int *y, const int& x) {
if (*y == -1) {
*y = x;
return true;
} else if (*y != x && x != -1) {
return false;
}
return true;
}
/*!
* \brief macro assign shape to out if out is unknown otherwise check consistency
* Use macro so we can see the error file more clearly
* \param shape_array the shape array to store the result
* \param index the index of in the array
* \param shape the inferred shape
*/
#define SHAPE_ASSIGN_CHECK(shape_array, index, shape) \
{ \
if (!shape_assign(&(shape_array)[index], TShape(shape))) { \
std::ostringstream os; \
os << "Shape inconsistent, Provided=" << (shape_array)[index] << ',' \
<< " inferred shape=" << shape; \
throw InferShapeError(os.str(), index); \
} \
}
/*!
* \brief macro assign type to out if out is unknown (-1) otherwise check consistency
* Use macro so we can see the error file more clearly
* \param type_array the type array to store the result
* \param index the index of in the array
* \param type the inferred type
*/
#define TYPE_ASSIGN_CHECK(type_array, index, type) \
{ \
if (!type_assign(&(type_array)[index], type)) { \
std::ostringstream os; \
os << "Type inconsistent, Provided=" \
<< type_string((type_array)[index]) << ',' \
<< " inferred type=" << type_string(type); \
throw InferTypeError(os.str(), index); \
} \
}
// simply return the shape as same
inline bool SameShape(const NodeAttrs& attrs,
std::vector<TShape> *ishape,
std::vector<TShape> *oshape) {
if (ishape->size() == 0 || (*ishape)[0].ndim() == 0) return false;
for (TShape& pshape : *oshape) {
pshape = (*ishape)[0];
}
for (TShape& pshape : *ishape) {
pshape = (*ishape)[0];
}
return true;
}
} // namespace top
} // namespace nnvm
#endif // NNVM_TOP_OP_COMMON_H_
/*!
* Copyright (c) 2017 by Contributors
* \file tensor.cc
* \brief Property def of tensor operators.
*/
#include <nnvm/base.h>
#include <nnvm/top/tensor.h>
namespace nnvm {
namespace top {
} // namespace top
} // namespace nnvm
import nnvm.symbol as sym import nnvm.symbol as sym
from nnvm import NNVMError from nnvm import NNVMError
def test_dense():
x = sym.Variable('x')
y = sym.dense(x)
assert y.list_input_names() == ['x']
def test_compose(): def test_compose():
x = sym.Variable('x') x = sym.Variable('x')
z = sym.Variable('z') z = sym.Variable('z')
......
import nnvm.symbol as sym
from nnvm import NNVMError
def test_dense():
x = sym.Variable('x')
y = sym.dense(x, units=3, name="dense")
assert y.list_input_names() == ['x', 'dense_weight', 'dense_bias']
if __name__ == "__main__":
test_dense()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment