Commit 55d26ae8 by Pariksheet Pinjari Committed by Yizhi Liu

[Frontend][Darknet] L2 normalization support in darknet (#1916)

* l2 normalization

* retrigger CI
parent 48ebb4d6
...@@ -267,6 +267,13 @@ def _darknet_upsampling(inputs, attrs): ...@@ -267,6 +267,13 @@ def _darknet_upsampling(inputs, attrs):
new_attrs['scale'] = attrs.get('scale', 1) new_attrs['scale'] = attrs.get('scale', 1)
return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None
def _darknet_l2normalize(inputs, attrs):
"""Process the l2 normalization operation."""
op_name, new_attrs = 'l2_normalize', {}
new_attrs['eps'] = attrs.get('eps', 0)
new_attrs['axis'] = attrs.get('axis', 1)
return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None
def _darknet_softmax_output(inputs, attrs): def _darknet_softmax_output(inputs, attrs):
"""Process the softmax operation.""" """Process the softmax operation."""
temperature = attrs.get('temperature', 1) temperature = attrs.get('temperature', 1)
...@@ -370,6 +377,7 @@ _DARKNET_CONVERT_MAP = { ...@@ -370,6 +377,7 @@ _DARKNET_CONVERT_MAP = {
LAYERTYPE.REGION : _darknet_region, LAYERTYPE.REGION : _darknet_region,
LAYERTYPE.SHORTCUT : _darknet_shortcut, LAYERTYPE.SHORTCUT : _darknet_shortcut,
LAYERTYPE.UPSAMPLE : _darknet_upsampling, LAYERTYPE.UPSAMPLE : _darknet_upsampling,
LAYERTYPE.L2NORM : _darknet_l2normalize,
LAYERTYPE.YOLO : _darknet_yolo, LAYERTYPE.YOLO : _darknet_yolo,
LAYERTYPE.DETECTION : _darknet_op_not_support, LAYERTYPE.DETECTION : _darknet_op_not_support,
LAYERTYPE.CROP : _darknet_op_not_support, LAYERTYPE.CROP : _darknet_op_not_support,
...@@ -630,6 +638,10 @@ class GraphProto(object): ...@@ -630,6 +638,10 @@ class GraphProto(object):
elif LAYERTYPE.UPSAMPLE == layer.type: elif LAYERTYPE.UPSAMPLE == layer.type:
attr.update({'scale' : layer.stride}) attr.update({'scale' : layer.stride})
elif LAYERTYPE.L2NORM == layer.type:
pass
else: else:
err = "Darknet layer type {} is not supported in nnvm.".format(layer.type) err = "Darknet layer type {} is not supported in nnvm.".format(layer.type)
raise NotImplementedError(err) raise NotImplementedError(err)
......
...@@ -512,6 +512,7 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou ...@@ -512,6 +512,7 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou
layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam); layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam); layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
layer make_upsample_layer(int batch, int w, int h, int c, int stride); layer make_upsample_layer(int batch, int w, int h, int c, int stride);
layer make_l2norm_layer(int batch, int inputs);
void free_network(network *net); void free_network(network *net);
""" """
) )
...@@ -107,7 +107,7 @@ def test_forward(net, build_dtype='float32'): ...@@ -107,7 +107,7 @@ def test_forward(net, build_dtype='float32'):
out.insert(0, attributes) out.insert(0, attributes)
out.insert(0, _read_memory_buffer((layer.total*2, ), layer.biases)) out.insert(0, _read_memory_buffer((layer.total*2, ), layer.biases))
out.insert(0, _read_memory_buffer((layer.n, ), layer.mask, dtype='int32')) out.insert(0, _read_memory_buffer((layer.n, ), layer.mask, dtype='int32'))
layer_ou tshape = (layer.batch, layer.out_c, layer_outshape = (layer.batch, layer.out_c,
layer.out_h, layer.out_w) layer.out_h, layer.out_w)
out.insert(0, _read_memory_buffer(layer_outshape, layer.output)) out.insert(0, _read_memory_buffer(layer_outshape, layer.output))
elif i == net.n-1: elif i == net.n-1:
...@@ -361,6 +361,19 @@ def test_forward_upsample(): ...@@ -361,6 +361,19 @@ def test_forward_upsample():
test_forward(net) test_forward(net)
LIB.free_network(net) LIB.free_network(net)
def test_forward_l2normalize():
'''test l2 normalization layer'''
net = LIB.make_network(1)
layer = LIB.make_l2norm_layer(1, 224*224*3)
layer.c = layer.out_c = 3
layer.h = layer.out_h = 224
layer.w = layer.out_w = 224
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_elu(): def test_forward_elu():
'''test elu activation layer''' '''test elu activation layer'''
net = LIB.make_network(1) net = LIB.make_network(1)
...@@ -520,6 +533,7 @@ if __name__ == '__main__': ...@@ -520,6 +533,7 @@ if __name__ == '__main__':
test_forward_region() test_forward_region()
test_forward_yolo_op() test_forward_yolo_op()
test_forward_upsample() test_forward_upsample()
test_forward_l2normalize()
test_forward_elu() test_forward_elu()
test_forward_rnn() test_forward_rnn()
test_forward_crnn() test_forward_crnn()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment