Unverified Commit 3aabbd9c by Wang Yucheng Committed by GitHub

[Torch] Add support for max_pool1d (#5142)

* [Torch] Add support for max_pool1d

* add test

* fix line-too-long

* remove wrapper class
parent 686911e2
...@@ -213,12 +213,33 @@ def _maxpool_2d(): ...@@ -213,12 +213,33 @@ def _maxpool_2d():
pool_size = _infer_shape(inputs[1]) pool_size = _infer_shape(inputs[1])
strides = _infer_shape(inputs[2]) strides = _infer_shape(inputs[2])
padding = _infer_shape(inputs[3]) padding = _infer_shape(inputs[3])
dilation = _infer_shape(inputs[4])
ceil_mode = int(inputs[5]) ceil_mode = int(inputs[5])
if dilation != (1, 1):
msg = "MaxPool2d with dilation %s is not implemented" % (str(dilation), )
raise NotImplementedError(msg)
return _op.nn.max_pool2d(data, pool_size, strides, padding, "NCHW", ceil_mode) return _op.nn.max_pool2d(data, pool_size, strides, padding, "NCHW", ceil_mode)
return _impl return _impl
def _maxpool_1d():
def _impl(inputs, input_types):
data = inputs[0]
pool_size = _infer_shape(inputs[1])
strides = _infer_shape(inputs[2])
padding = _infer_shape(inputs[3])
dilation = _infer_shape(inputs[4])
ceil_mode = int(inputs[5])
if dilation != (1,):
msg = "MaxPool1d with dilation %s is not implemented" % (str(dilation), )
raise NotImplementedError(msg)
return _op.nn.max_pool1d(data, pool_size, strides, padding, "NCW", ceil_mode)
return _impl
def _hardtanh(): def _hardtanh():
def _impl(inputs, input_types): def _impl(inputs, input_types):
a = inputs[0] a = inputs[0]
...@@ -868,6 +889,7 @@ _convert_map = { ...@@ -868,6 +889,7 @@ _convert_map = {
"aten::adaptive_max_pool2d" : _adaptive_max_pool_2d(), "aten::adaptive_max_pool2d" : _adaptive_max_pool_2d(),
"aten::max_pool2d" : _maxpool_2d(), "aten::max_pool2d" : _maxpool_2d(),
"aten::max_pool2d_with_indices" : _maxpool_2d(), "aten::max_pool2d_with_indices" : _maxpool_2d(),
"aten::max_pool1d" : _maxpool_1d(),
"aten::hardtanh" : _hardtanh(), "aten::hardtanh" : _hardtanh(),
"aten::hardtanh_" : _hardtanh(), "aten::hardtanh_" : _hardtanh(),
"aten::_convolution" : _convolution(), "aten::_convolution" : _convolution(),
......
...@@ -987,6 +987,10 @@ Array<te::Tensor> Pool1DCompute(const Attrs& attrs, ...@@ -987,6 +987,10 @@ Array<te::Tensor> Pool1DCompute(const Attrs& attrs,
<< " or 4-D input (e.g. NCWc on for vector instructions)" << " or 4-D input (e.g. NCWc on for vector instructions)"
<< " or 5-D input (e.g. NCWnc for tensor accelerators)"; << " or 5-D input (e.g. NCWnc for tensor accelerators)";
if (param->padding.size() == 1) {
padding.push_back(padding[0]);
}
if (mode == topi::nn::kAvgPool) { if (mode == topi::nn::kAvgPool) {
bool count_include_pad = reinterpret_cast<const AvgPool1DAttrs*>(param)->count_include_pad; bool count_include_pad = reinterpret_cast<const AvgPool1DAttrs*>(param)->count_include_pad;
return Array<te::Tensor>{ return Array<te::Tensor>{
......
...@@ -351,21 +351,33 @@ def test_forward_adaptiveavgpool(): ...@@ -351,21 +351,33 @@ def test_forward_adaptiveavgpool():
verify_model(AdaptiveAvgPool2D1().float().eval(), input_data=input_data) verify_model(AdaptiveAvgPool2D1().float().eval(), input_data=input_data)
verify_model(AdaptiveAvgPool2D2().float().eval(), input_data=input_data) verify_model(AdaptiveAvgPool2D2().float().eval(), input_data=input_data)
def test_forward_maxpool(): def test_forward_maxpool2d():
torch.set_grad_enabled(False) torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10] input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
class MaxPool2D1(Module): verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(),
def forward(self, *args): input_data)
return torch.nn.MaxPool2d(kernel_size=[1, 1])(args[0]) verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(),
input_data)
class MaxPool2D2(Module): verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4],
def forward(self, *args): padding=2,
return torch.nn.MaxPool2d(kernel_size=[10, 10])(args[0]) stride=2).eval(),
input_data)
def test_forward_maxpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float() input_data = torch.rand(input_shape).float()
verify_model(MaxPool2D1().float().eval(), input_data=input_data)
verify_model(MaxPool2D2().float().eval(), input_data=input_data) verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(),
input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(),
input_data)
verify_model( torch.nn.MaxPool1d(kernel_size=4,
padding=2,
stride=2).eval(),
input_data)
def test_forward_avgpool(): def test_forward_avgpool():
torch.set_grad_enabled(False) torch.set_grad_enabled(False)
...@@ -1034,7 +1046,8 @@ if __name__ == "__main__": ...@@ -1034,7 +1046,8 @@ if __name__ == "__main__":
test_forward_concatenate() test_forward_concatenate()
test_forward_relu() test_forward_relu()
test_forward_adaptiveavgpool() test_forward_adaptiveavgpool()
test_forward_maxpool() test_forward_maxpool2d()
test_forward_maxpool1d()
test_forward_hardtanh() test_forward_hardtanh()
test_forward_conv() test_forward_conv()
test_forward_threshold() test_forward_threshold()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment