Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
50681784
Commit
50681784
authored
Jun 27, 2018
by
Tatsuya Nishiyama
Committed by
Tianqi Chen
Jun 26, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOPI] Add C++ implementation of elementwise operators (#1306)
parent
44d8203f
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
176 additions
and
14 deletions
+176
-14
nnvm/python/nnvm/top/tensor.py
+1
-1
topi/include/topi/elemwise.h
+73
-0
topi/python/topi/tensor.py
+5
-12
topi/src/topi.cc
+15
-0
topi/tests/python/test_topi_tensor.py
+1
-1
topi/tests/python_cpp/test_topi_tensor.py
+81
-0
No files found.
nnvm/python/nnvm/top/tensor.py
View file @
50681784
...
...
@@ -187,7 +187,7 @@ def compute_elemwise_sum(attrs, inputs, _):
"""Compute definition of elemwise sum"""
num_args
=
attrs
.
get_int
(
"num_args"
)
assert
num_args
==
len
(
inputs
),
"Number of tensors does not match num_args."
return
topi
.
tensor
.
elemwise_sum
(
inputs
,
num_args
)
return
topi
.
tensor
.
elemwise_sum
(
inputs
)
reg
.
register_pattern
(
"elemwise_sum"
,
OpPattern
.
ELEMWISE
)
reg
.
register_schedule
(
"elemwise_sum"
,
_fschedule_elemwise
)
...
...
topi/include/topi/elemwise.h
View file @
50681784
...
...
@@ -10,6 +10,8 @@
#include "topi/tags.h"
#include "tvm/tvm.h"
#include "tvm/ir.h"
#include "tvm/ir_pass.h"
namespace
topi
{
using
namespace
tvm
;
...
...
@@ -122,5 +124,76 @@ inline Tensor cast(const Tensor& x,
},
name
,
tag
);
}
/*!
* \brief Creates an operation that sum each element of a tensor
*
* \param xs The input tensor array
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the sum operation
*/
inline
Tensor
elemwise_sum
(
const
Array
<
Tensor
>&
xs
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kElementWise
)
{
CHECK_GT
(
xs
.
size
(),
0
)
<<
"elemwise sum must have at least one input tensor."
;
return
compute
(
xs
[
0
]
->
shape
,
[
&
](
const
Array
<
Var
>&
i
)
{
auto
sum_expr
=
xs
[
0
](
i
);
for
(
size_t
j
=
1
;
j
<
xs
.
size
();
j
++
)
{
sum_expr
=
sum_expr
+
xs
[
j
](
i
);
}
return
sum_expr
;
},
name
,
tag
);
}
/*!
* \brief Creates an operation that fill a tensor with fill_value
*
* \param shape The shape of a tensor
* \param dtype The Type of fill_value
* \param fill_value The value to be filled
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the full operation
*/
inline
Tensor
full
(
const
Array
<
Expr
>&
shape
,
Type
dtype
,
const
Expr
fill_value
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kElementWise
)
{
Expr
ev
=
lossless_cast
(
dtype
,
fill_value
);
if
(
!
ev
.
defined
())
{
LOG
(
ERROR
)
<<
"Can't cast fill_value to "
<<
dtype
;
}
return
compute
(
shape
,
[
&
](
const
Array
<
Var
>&
i
)
{
return
ev
;
},
name
,
tag
);
}
/*!
* \brief Creates an operation that construct a tensor with same shape as input tensor,
* then fill a tensor with fill_value
*
* \param x The input tensor
* \param fill_value The value to be filled
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op memeber is the full_like operation
*/
inline
Tensor
full_like
(
const
Tensor
&
x
,
const
Expr
fill_value
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kElementWise
)
{
Expr
ev
=
lossless_cast
(
x
->
dtype
,
fill_value
);
if
(
!
ev
.
defined
())
{
LOG
(
ERROR
)
<<
"Can't cast fill_value to "
<<
x
->
dtype
;
}
return
compute
(
x
->
shape
,
[
&
](
const
Array
<
Var
>&
i
)
{
return
ev
;
},
name
,
tag
);
}
}
// namespace topi
#endif // TOPI_ELEMWISE_H_
topi/python/topi/tensor.py
View file @
50681784
...
...
@@ -2,30 +2,24 @@
"""Elementwise operators"""
from
__future__
import
absolute_import
as
_abs
import
tvm
from
.
import
cpp
from
.
import
tag
@tvm.tag_scope
(
tag
=
tag
.
ELEMWISE
)
def
elemwise_sum
(
xs
,
num_args
):
def
elemwise_sum
(
xs
):
"""Perform element-wise sum on inputs
Parameters
----------
xs : list of tvm.Tensor
Input arguments.
num_args : int
Number of arguments
Returns
-------
y : tvm.Tensor
The result.
"""
assert
len
(
xs
)
>
0
,
"elemwise sum must have at least one input tensor."
def
_compute
(
*
i
):
return
sum
([
x
(
*
i
)
for
x
in
xs
])
return
tvm
.
compute
(
xs
[
0
]
.
shape
,
_compute
)
return
cpp
.
elemwise_sum
(
xs
)
@tvm.tag_scope
(
tag
=
tag
.
ELEMWISE
)
...
...
@@ -46,7 +40,7 @@ def full(shape, dtype, fill_value):
y : tvm.Tensor
The result.
"""
return
tvm
.
compute
(
shape
,
lambda
*
i
:
tvm
.
const
(
fill_value
,
dtype
)
)
return
cpp
.
full
(
shape
,
dtype
,
fill_value
)
@tvm.tag_scope
(
tag
=
tag
.
ELEMWISE
)
...
...
@@ -66,5 +60,4 @@ def full_like(x, fill_value):
y : tvm.Tensor
The result.
"""
dtype
=
x
.
dtype
return
tvm
.
compute
(
x
.
shape
,
lambda
*
i
:
tvm
.
const
(
fill_value
,
dtype
))
return
cpp
.
full_like
(
x
,
fill_value
)
topi/src/topi.cc
View file @
50681784
...
...
@@ -163,6 +163,21 @@ TVM_REGISTER_GLOBAL("topi.cast")
*
rv
=
cast
(
args
[
0
],
args
[
1
]);
});
TVM_REGISTER_GLOBAL
(
"topi.elemwise_sum"
)
.
set_body
([](
TVMArgs
args
,
TVMRetValue
*
rv
)
{
*
rv
=
elemwise_sum
(
args
[
0
]);
});
TVM_REGISTER_GLOBAL
(
"topi.full"
)
.
set_body
([](
TVMArgs
args
,
TVMRetValue
*
rv
)
{
*
rv
=
full
(
args
[
0
],
args
[
1
],
args
[
2
]);
});
TVM_REGISTER_GLOBAL
(
"topi.full_like"
)
.
set_body
([](
TVMArgs
args
,
TVMRetValue
*
rv
)
{
*
rv
=
full_like
(
args
[
0
],
args
[
1
]);
});
/* Ops from nn.h */
TVM_REGISTER_GLOBAL
(
"topi.nn.relu"
)
.
set_body
([](
TVMArgs
args
,
TVMRetValue
*
rv
)
{
...
...
topi/tests/python/test_topi_tensor.py
View file @
50681784
...
...
@@ -11,7 +11,7 @@ def verify_elemwise_sum(num_args, dtype):
for
i
in
range
(
num_args
):
tvm_placeholders
.
append
(
tvm
.
placeholder
(
shape
,
name
=
"data"
+
str
(
i
),
dtype
=
dtype
))
esum
=
topi
.
elemwise_sum
(
tvm_placeholders
,
num_args
=
num_args
)
esum
=
topi
.
elemwise_sum
(
tvm_placeholders
)
s
=
tvm
.
create_schedule
([
esum
.
op
])
@memoize
(
"topi.tests.test_topi_elemwise_sum"
)
...
...
topi/tests/python_cpp/test_topi_tensor.py
0 → 100644
View file @
50681784
"""Test code for tensor operator"""
import
numpy
as
np
import
tvm
import
topi
def
verify_elemwise_sum
(
num_args
,
dtype
):
shape
=
(
3
,
5
,
4
)
tvm_placeholders
=
[]
for
i
in
range
(
num_args
):
tvm_placeholders
.
append
(
tvm
.
placeholder
(
shape
,
name
=
"data"
+
str
(
i
),
dtype
=
dtype
))
esum
=
topi
.
cpp
.
elemwise_sum
(
tvm_placeholders
)
s
=
tvm
.
create_schedule
([
esum
.
op
])
def
get_ref_data
():
np_nd
=
[
np
.
random
.
uniform
(
0
,
10
,
size
=
shape
)
.
astype
(
dtype
)
for
i
in
range
(
num_args
)]
return
np_nd
np_nd
=
get_ref_data
()
def
check_device
(
device
):
if
not
tvm
.
module
.
enabled
(
device
):
print
(
"Skip because
%
s is not enabled"
%
device
)
return
ctx
=
tvm
.
context
(
device
,
0
)
out
=
tvm
.
nd
.
array
(
np
.
zeros
(
shape
,
dtype
=
dtype
),
ctx
)
f
=
tvm
.
build
(
s
,
tvm_placeholders
+
[
esum
],
device
,
name
=
"elemwise_sum"
)
tvm_nd
=
[
tvm
.
nd
.
array
(
nd
,
ctx
)
for
nd
in
np_nd
]
+
[
out
]
f
(
*
tvm_nd
)
np_out
=
np
.
sum
(
np
.
array
(
np_nd
),
axis
=
0
)
np
.
testing
.
assert_allclose
(
out
.
asnumpy
(),
np_out
,
rtol
=
1e-5
)
for
device
in
[
"llvm"
]:
check_device
(
device
)
def
verify_full
(
shape
,
dtype
,
fill_value
):
A
=
tvm
.
placeholder
(
shape
,
dtype
=
dtype
,
name
=
"A"
)
B
=
topi
.
cpp
.
full_like
(
A
,
fill_value
)
C
=
topi
.
cpp
.
full
(
shape
,
dtype
,
fill_value
)
s1
=
tvm
.
create_schedule
([
B
.
op
])
s2
=
tvm
.
create_schedule
([
C
.
op
])
def
get_ref_data
():
return
np
.
full
(
shape
,
fill_value
,
dtype
)
np_nd
=
get_ref_data
()
def
check_device
(
device
):
if
not
tvm
.
module
.
enabled
(
device
):
print
(
"Skip because
%
s is not enabled"
%
device
)
return
target
=
topi
.
cpp
.
TEST_create_target
(
device
)
ctx
=
tvm
.
context
(
device
,
0
)
out
=
tvm
.
nd
.
array
(
np
.
zeros
(
shape
,
dtype
=
dtype
),
ctx
)
f
=
tvm
.
build
(
s1
,
[
A
,
B
],
device
,
name
=
"full_like"
)
f
(
tvm
.
nd
.
array
(
np
.
zeros
(
shape
,
dtype
),
ctx
),
out
)
np
.
testing
.
assert_allclose
(
out
.
asnumpy
(),
np_nd
,
rtol
=
1e-5
)
f
=
tvm
.
build
(
s2
,
[
C
],
device
,
name
=
"full"
)
f
(
out
)
np
.
testing
.
assert_allclose
(
out
.
asnumpy
(),
np_nd
,
rtol
=
1e-5
)
for
device
in
[
"llvm"
]:
check_device
(
device
)
def
test_elemwise_sum
():
verify_elemwise_sum
(
1
,
"float32"
)
verify_elemwise_sum
(
5
,
"float32"
)
verify_elemwise_sum
(
4
,
"int32"
)
def
test_full
():
verify_full
((
3
,
4
,
5
),
"float32"
,
3.14
)
verify_full
((
10
,),
"int32"
,
7
)
if
__name__
==
"__main__"
:
test_elemwise_sum
()
test_full
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment