Commit e6319f62 by Yuwei Hu Committed by Tianqi Chen

Keras Frontend (#273)

* vgg16 success

* remove six.PY2, use sys.version_info;
convert_activation() accepts activation type name(str, e.g. 'relu') as input;

* add convert_merge

* fix convert_batchnorm;
improve tests

* fix lint

* add numpy-style pad operator

* deal with asymmetry padding

* resnet50 success

* fix pool_convert; xception passes test

* update tvm

* fix bias error; all tests pass

* use > >, not >>
parent 2406757d
...@@ -47,6 +47,7 @@ This level enables fully connected multi-layer perceptron. ...@@ -47,6 +47,7 @@ This level enables fully connected multi-layer perceptron.
nnvm.symbol.batch_norm nnvm.symbol.batch_norm
nnvm.symbol.softmax nnvm.symbol.softmax
nnvm.symbol.log_softmax nnvm.symbol.log_softmax
nnvm.symbol.pad
**Level 2: Convolutions** **Level 2: Convolutions**
...@@ -119,7 +120,7 @@ Detailed Definitions ...@@ -119,7 +120,7 @@ Detailed Definitions
.. autofunction:: nnvm.symbol.batch_norm .. autofunction:: nnvm.symbol.batch_norm
.. autofunction:: nnvm.symbol.softmax .. autofunction:: nnvm.symbol.softmax
.. autofunction:: nnvm.symbol.log_softmax .. autofunction:: nnvm.symbol.log_softmax
.. autofunction:: nnvm.symbol.pad
.. autofunction:: nnvm.symbol.conv2d .. autofunction:: nnvm.symbol.conv2d
.. autofunction:: nnvm.symbol.conv2d_transpose .. autofunction:: nnvm.symbol.conv2d_transpose
......
...@@ -101,6 +101,21 @@ struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> { ...@@ -101,6 +101,21 @@ struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
} }
}; };
struct PadParam : public dmlc::Parameter<PadParam> {
float pad_value;
Tuple<Tuple<int> > pad_width;
DMLC_DECLARE_PARAMETER(PadParam) {
DMLC_DECLARE_FIELD(pad_value).set_default(0.0)
.describe("The value to be padded.");
DMLC_DECLARE_FIELD(pad_width)
.describe("Number of values padded to the edges of each axis, "
"in the format of ((before_1, after_1), ... (before_N, after_N))");
}
};
struct Conv2DParam : public dmlc::Parameter<Conv2DParam> { struct Conv2DParam : public dmlc::Parameter<Conv2DParam> {
int channels; int channels;
TShape kernel_size; TShape kernel_size;
......
...@@ -3,3 +3,4 @@ from __future__ import absolute_import ...@@ -3,3 +3,4 @@ from __future__ import absolute_import
from .mxnet import from_mxnet from .mxnet import from_mxnet
from .onnx import from_onnx from .onnx import from_onnx
from .coreml import from_coreml from .coreml import from_coreml
from .keras import from_keras
...@@ -135,3 +135,38 @@ class AttrConverter(object): ...@@ -135,3 +135,38 @@ class AttrConverter(object):
if key not in attr: if key not in attr:
raise AttributeError("Required attribute {} not found.".format(key)) raise AttributeError("Required attribute {} not found.".format(key))
return attr[key] return attr[key]
class SymbolTable(object):
"""Table storing symbols by names."""
def __init__(self):
self.vars = {}
self.params = {}
self.const_ctr = 1
self.in_padding = False
self.paddings = [0, 0]
def new_const(self, value):
name = "_param_%d" % (self.const_ctr)
self.const_ctr += 1
self.params[name] = value
self.vars[name] = _sym.Variable(name=name)
return self.vars[name]
def get_var(self, name, must_contain=True):
if must_contain:
assert name in self.vars
if name not in self.vars:
self.vars[name] = _sym.Variable(name=name)
return self.vars[name]
def set_var(self, name, sym):
assert isinstance(sym, _sym.Symbol)
self.vars[name] = sym
def set_padding(self, paddings):
self.paddings = paddings
self.in_padding = True
def clear_padding(self):
self.in_padding = False
...@@ -4,44 +4,11 @@ from __future__ import absolute_import as _abs ...@@ -4,44 +4,11 @@ from __future__ import absolute_import as _abs
import tvm import tvm
import numpy as np import numpy as np
from .. import symbol as _sym from .. import symbol as _sym
from .common import SymbolTable
__all__ = ['from_coreml'] __all__ = ['from_coreml']
class SymbolTable(object):
"""Table storing symbols by names."""
def __init__(self):
self.vars = {}
self.params = {}
self.const_ctr = 1
self.in_padding = False
self.paddings = [0, 0]
def new_const(self, value):
name = "_param_%d" % (self.const_ctr)
self.const_ctr += 1
self.params[name] = value
self.vars[name] = _sym.Variable(name=name)
return self.vars[name]
def get_var(self, name, must_contain=True):
if must_contain:
assert name in self.vars
if name not in self.vars:
self.vars[name] = _sym.Variable(name=name)
return self.vars[name]
def set_var(self, name, sym):
assert isinstance(sym, _sym.Symbol)
self.vars[name] = sym
def set_padding(self, paddings):
self.paddings = paddings
self.in_padding = True
def clear_padding(self):
self.in_padding = False
def NeuralNetworkImageScaler(op, insym, symtab): def NeuralNetworkImageScaler(op, insym, symtab):
# this changes the symbol # this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1]) biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
......
...@@ -52,6 +52,22 @@ class AttrDict(object): ...@@ -52,6 +52,22 @@ class AttrDict(object):
""" """
return tuple(int(x) for x in self[key][1:-1].split(",") if x) return tuple(int(x) for x in self[key][1:-1].split(",") if x)
def get_int_pair_tuple(self, key):
"""Get tuple of integer pairs from attr dict
Parameters
----------
key : str
The attr key
Returns
-------
tuple : tuple of int pairs
The result tuple
"""
flat = [int(x.strip(' [] ')) for x in self[key][1:-1].split(",")]
return tuple((flat[i], flat[i+1]) for i in range(0, len(flat), 2))
def get_int(self, key): def get_int(self, key):
"""Get integer from attr dict """Get integer from attr dict
......
...@@ -39,6 +39,22 @@ reg.register_schedule("flatten", _fschedule_broadcast) ...@@ -39,6 +39,22 @@ reg.register_schedule("flatten", _fschedule_broadcast)
reg.register_pattern("flatten", OpPattern.INJECTIVE) reg.register_pattern("flatten", OpPattern.INJECTIVE)
# pad
@reg.register_compute("pad")
def compute_pad(attrs, inputs, _):
"""Compute definition of pad"""
pad_width = attrs.get_int_pair_tuple('pad_width')
assert len(pad_width) == len(inputs[0].shape) and \
len(pad_width[0]) == 2, "illegal pad_width"
pad_before = [x[0] for x in pad_width]
pad_after = [x[1] for x in pad_width]
pad_value = attrs.get_int('pad_value')
return topi.nn.pad(inputs[0], pad_before, pad_after, pad_value)
reg.register_schedule("pad", _fschedule_broadcast)
reg.register_pattern("pad", OpPattern.INJECTIVE)
# softmax # softmax
@reg.register_compute("softmax") @reg.register_compute("softmax")
def compute_softmax(attrs, inputs, _): def compute_softmax(attrs, inputs, _):
...@@ -53,9 +69,9 @@ def schedule_softmax(_, outs, target): ...@@ -53,9 +69,9 @@ def schedule_softmax(_, outs, target):
with tvm.target.create(target): with tvm.target.create(target):
return topi.generic.schedule_softmax(outs) return topi.generic.schedule_softmax(outs)
reg.register_pattern("softmax", OpPattern.OPAQUE) reg.register_pattern("softmax", OpPattern.OPAQUE)
# log softmax # log softmax
@reg.register_compute("log_softmax") @reg.register_compute("log_softmax")
def compute_log_softmax(attrs, inputs, _): def compute_log_softmax(attrs, inputs, _):
...@@ -73,6 +89,7 @@ def schedule_log_softmax(_, outs, target): ...@@ -73,6 +89,7 @@ def schedule_log_softmax(_, outs, target):
# Mark softmax as extern as we do not fuse it in call cases # Mark softmax as extern as we do not fuse it in call cases
reg.register_pattern("log_softmax", OpPattern.OPAQUE) reg.register_pattern("log_softmax", OpPattern.OPAQUE)
# dense # dense
@reg.register_compute("dense") @reg.register_compute("dense")
def compute_dense(attrs, inputs, _): def compute_dense(attrs, inputs, _):
......
...@@ -254,5 +254,42 @@ NNVM_REGISTER_OP(leaky_relu) ...@@ -254,5 +254,42 @@ NNVM_REGISTER_OP(leaky_relu)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>) .set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1); .set_support_level(1);
DMLC_REGISTER_PARAMETER(PadParam);
inline bool PadInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape>* in_shape,
std::vector<TShape>* out_shape) {
const PadParam& param = nnvm::get<PadParam>(attrs.parsed);
CHECK_EQ(in_shape->size(), 1U);
CHECK_EQ(out_shape->size(), 1U);
TShape dshape = (*in_shape)[0];
if (dshape.ndim() == 0) return false;
CHECK_EQ(param.pad_width.ndim(), dshape.ndim());
CHECK_EQ(param.pad_width[0].ndim(), 2U);
TShape oshape = dshape;
for (uint32_t i = 0; i < dshape.ndim(); i++) {
int pad_before = param.pad_width[i][0];
int pad_after = param.pad_width[i][1];
oshape[i] = dshape[i] + pad_before + pad_after;
}
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 0, oshape);
return true;
}
NNVM_REGISTER_OP(pad)
.describe(R"code(Pad for n-D tensor.
)code" NNVM_ADD_FILELINE)
.add_argument("data", "n-D Tensor", "Input data.")
.add_arguments(PadParam::__FIELDS__())
.set_attr_parser(ParamParser<PadParam>)
.set_attr<FGetAttrDict>("FGetAttrDict", ParamGetAttrDict<PadParam>)
.set_num_outputs(1)
.set_num_inputs(1)
.set_attr<FInferShape>("FInferShape", PadInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1);
} // namespace top } // namespace top
} // namespace nnvm } // namespace nnvm
...@@ -244,6 +244,25 @@ def test_squeeze(): ...@@ -244,6 +244,25 @@ def test_squeeze():
verify_squeeze((1, 3, 1), axis=0) verify_squeeze((1, 3, 1), axis=0)
verify_squeeze((1, 3, 2, 5, 1), axis=-1) verify_squeeze((1, 3, 2, 5, 1), axis=-1)
def test_pad():
x = sym.Variable("x")
y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.)
dtype = "float32"
dshape = (1, 3, 28, 28)
oshape = (1, 3, 29, 33)
shape_dict = {"x": dshape}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype))
b_np = np.pad(data.asnumpy(), pad_width=((0, 0), (0, 0), (0, 1), (2, 3)),
mode='constant', constant_values=1.)
np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)
if __name__ == "__main__": if __name__ == "__main__":
test_split() test_split()
test_concatenate() test_concatenate()
...@@ -257,3 +276,4 @@ if __name__ == "__main__": ...@@ -257,3 +276,4 @@ if __name__ == "__main__":
test_sigmoid() test_sigmoid()
test_softmax() test_softmax()
test_squeeze() test_squeeze()
test_pad()
import numpy as np
import nnvm
import tvm
from tvm.contrib import graph_runtime
from nnvm.testing.config import ctx_list
import keras
# prevent keras from using up all gpu memory
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
def verify_keras_frontend(keras_model):
in_shape = [dim.value if dim.value is not None else 1 for dim in keras_model.input_layers[0].input.shape]
out_shape = [dim.value if dim.value is not None else 1 for dim in keras_model.output_layers[0].output.shape]
def get_keras_output(x, dtype='float32'):
return keras_model.predict(x)
def get_tvm_output(x, target, ctx, input_name='data', dtype='float32'):
sym, params = nnvm.frontend.from_keras(keras_model)
shape_dict = {input_name : x.shape}
with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
x = np.random.uniform(size=in_shape)
keras_out = get_keras_output(x)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(x.transpose([0,3,1,2]), target, ctx)
np.testing.assert_allclose(keras_out, tvm_out, rtol=1e-5, atol=1e-5)
def verify_forward_softrelu():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Activation('softplus')(data)
x = keras.layers.Concatenate()([x, x])
x = keras.layers.GlobalMaxPooling2D()(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def verify_forward_leaky_relu():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.LeakyReLU(alpha=0.3)(data)
x = keras.layers.Add()([x, x])
x = keras.layers.GlobalAveragePooling2D()(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def verify_forward_dense():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.MaxPooling2D(pool_size=(2,2))(data)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(10, activation='relu', kernel_initializer='uniform')(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def verify_forward_transpose_conv():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Conv2D(filters=10, kernel_size=(3,3), strides=(2,2), padding='same')(data)
x = keras.applications.mobilenet.DepthwiseConv2D(kernel_size=(3,3), padding='same')(x)
x = keras.layers.Conv2DTranspose(filters=64, kernel_size=(3,3), padding='valid')(x)
x = keras.layers.GlobalMaxPooling2D()(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def verify_forward_separable_conv():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.SeparableConv2D(filters=10, kernel_size=(3,3),
padding='same', activation='relu')(data)
x = keras.layers.BatchNormalization(scale=True, center=False,
beta_initializer='uniform', gamma_initializer='uniform')(x)
x = keras.layers.GlobalAveragePooling2D()(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def verify_forward_vgg16():
keras_model = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',
input_shape=(224,224,3), classes=1000)
verify_keras_frontend(keras_model)
def verify_forward_xception():
keras_model = keras.applications.xception.Xception(include_top=True, weights='imagenet',
input_shape=(299,299,3), classes=1000)
verify_keras_frontend(keras_model)
def verify_forward_resnet50():
keras_model = keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet',
input_shape=(224,224,3), classes=1000)
verify_keras_frontend(keras_model)
if __name__ == '__main__':
verify_forward_softrelu()
verify_forward_leaky_relu()
verify_forward_dense()
verify_forward_transpose_conv()
verify_forward_separable_conv()
verify_forward_vgg16()
verify_forward_xception()
verify_forward_resnet50()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment