Unverified Commit e722301a by Samuel Committed by GitHub

[PYTORCH]Activations for pytorch (#5194)

* [PYTORCH]Activations for pytorch

* Review comments updated
parent 2b6d69c6
......@@ -193,6 +193,33 @@ def _relu():
return _op.nn.relu(data)
return _impl
def _prelu():
def _impl(inputs, input_types):
data = inputs[0]
alpha = inputs[1]
return _op.nn.prelu(data, alpha)
return _impl
def _leaky_relu():
def _impl(inputs, input_types):
data = inputs[0]
alpha = int(inputs[1])
return _op.nn.leaky_relu(data, alpha)
return _impl
def _elu():
def _impl(inputs, input_types):
data = inputs[0]
alpha = _expr.const(int(inputs[1]), dtype='float32')
return alpha * _op.nn.relu(alpha - _op.exp(data)) + _op.nn.relu(data)
return _impl
def _log_sigmoid():
def _impl(inputs, input_types):
data = inputs[0]
return _op.log(_op.tensor.sigmoid(data))
return _impl
def _adaptive_avg_pool_2d():
def _impl(inputs, input_types):
data = inputs[0]
......@@ -921,6 +948,10 @@ _convert_map = {
"aten::select" : _select(),
"aten::relu" : _relu(),
"aten::relu_" : _relu(),
"aten::prelu" : _prelu(),
"aten::leaky_relu" : _leaky_relu(),
"aten::elu" : _elu(),
"aten::log_sigmoid" : _log_sigmoid(),
"aten::adaptive_avg_pool2d" : _adaptive_avg_pool_2d(),
"aten::adaptive_max_pool2d" : _adaptive_max_pool_2d(),
"aten::max_pool2d" : _maxpool_2d(),
......
......@@ -327,29 +327,39 @@ def test_forward_concatenate():
def test_forward_relu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
class ReLU1(Module):
def forward(self, *args):
return torch.nn.ReLU()(args[0])
input_data = torch.rand(input_shape).float()
verify_model(ReLU1().float().eval(), input_data=input_data)
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
def test_forward_adaptiveavgpool():
def test_forward_prelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
class AdaptiveAvgPool2D1(Module):
def forward(self, *args):
return torch.nn.AdaptiveAvgPool2d([1, 1])(args[0])
def test_forward_leakyrelu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
class AdaptiveAvgPool2D2(Module):
def forward(self, *args):
return torch.nn.AdaptiveAvgPool2d([10, 10])(args[0])
def test_forward_elu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
def test_forward_log_sigmoid():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(AdaptiveAvgPool2D1().float().eval(), input_data=input_data)
verify_model(AdaptiveAvgPool2D2().float().eval(), input_data=input_data)
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
def test_forward_adaptiveavgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
def test_forward_maxpool2d():
torch.set_grad_enabled(False)
......@@ -406,28 +416,19 @@ def test_forward_avgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D1(Module):
def forward(self, *args):
return torch.nn.AvgPool2d(kernel_size=[10, 10])(args[0])
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(AvgPool2D1().float().eval(), input_data=input_data)
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
def test_forward_hardtanh():
torch.set_grad_enabled(False)
input_shape = [10]
class HardTanh1(Module):
def forward(self, *args):
return torch.nn.Hardtanh()(args[0])
input_data = torch.rand(input_shape).float()
verify_model(HardTanh1().float().eval(), input_data=input_data)
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
def test_forward_conv():
torch.set_grad_enabled(False)
......@@ -482,13 +483,8 @@ def test_forward_conv_transpose():
def test_forward_threshold():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Threshold1(Module):
def forward(self, *args):
return torch.nn.Threshold(0, 0)(args[0])
input_data = torch.rand(input_shape).float()
verify_model(Threshold1().float().eval(), input_data=input_data)
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
def test_forward_contiguous():
torch.set_grad_enabled(False)
......@@ -595,13 +591,8 @@ def test_forward_logsoftmax():
def test_forward_sigmoid():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Sigmoid1(Module):
def forward(self, *args):
return torch.nn.Sigmoid()(args[0])
input_data = torch.rand(input_shape).float()
verify_model(Sigmoid1().float().eval(), input_data=input_data)
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
def test_forward_dense():
torch.set_grad_enabled(False)
......@@ -1076,6 +1067,10 @@ if __name__ == "__main__":
test_forward_unsqueeze()
test_forward_concatenate()
test_forward_relu()
test_forward_prelu()
test_forward_leakyrelu()
test_forward_elu()
test_forward_log_sigmoid()
test_forward_adaptiveavgpool()
test_forward_maxpool2d()
test_forward_maxpool1d()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment