Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
1bc5d0ad
Commit
1bc5d0ad
authored
Sep 20, 2017
by
Yuwei HU
Committed by
Tianqi Chen
May 29, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
register softmax (#16)
parent
48038a9c
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
66 additions
and
2 deletions
+66
-2
nnvm/python/nnvm/top/nn.py
+19
-1
nnvm/tests/python/compiler/test_top_level1.py
+46
-0
nnvm/tests/python/unittest/test_top_level1.py
+1
-1
No files found.
nnvm/python/nnvm/top/nn.py
View file @
1bc5d0ad
...
...
@@ -23,7 +23,6 @@ def compute_conv2d(attrs, inputs):
out
=
topi
.
broadcast_add
(
out
,
bias
)
return
out
@reg.register_schedule
(
"conv2d"
)
def
schedule_conv2d
(
_
,
outs
,
target
):
"""Schedule definition of conv2d"""
...
...
@@ -33,3 +32,22 @@ def schedule_conv2d(_, outs, target):
return
tvm
.
create_schedule
([
x
.
op
for
x
in
outs
])
reg
.
register_pattern
(
"conv2d"
,
OpPattern
.
COMPLEX
)
# softmax
@reg.register_compute
(
"softmax"
)
def
compute_softmax
(
attrs
,
inputs
):
"""Compute definition of softmax"""
axis
=
attrs
.
get_int
(
"axis"
)
assert
axis
==
-
1
,
"only support axis == -1 for now"
return
topi
.
nn
.
softmax
(
inputs
[
0
])
@reg.register_schedule
(
"softmax"
)
def
schedule_softmax
(
_
,
outs
,
target
):
"""Schedule definition of softmax"""
if
target
==
"cuda"
:
return
topi
.
cuda
.
schedule_softmax
(
outs
)
# naive schedule
return
tvm
.
create_schedule
([
x
.
op
for
x
in
outs
])
reg
.
register_pattern
(
"softmax"
,
OpPattern
.
COMPLEX
)
nnvm/tests/python/compiler/test_top_level1.py
0 → 100644
View file @
1bc5d0ad
import
numpy
as
np
import
tvm
import
topi
import
nnvm.symbol
as
sym
import
nnvm.compiler
import
nnvm.runtime
USE_GPU
=
True
def
default_target
():
if
USE_GPU
:
return
'cuda'
else
:
return
'llvm'
def
default_ctx
():
if
USE_GPU
:
return
tvm
.
gpu
(
0
)
else
:
return
tvm
.
cpu
(
0
)
def
test_softmax
():
x
=
sym
.
Variable
(
"x"
)
y
=
sym
.
softmax
(
x
)
dtype
=
"float32"
dshape
=
(
10
,
1000
)
oshape
=
dshape
graph
,
lib
=
nnvm
.
compiler
.
build
(
y
,
default_target
(),
{
"x"
:
dshape
})
m
=
nnvm
.
runtime
.
create
(
graph
,
lib
,
default_ctx
())
# get member functions
set_input
,
run
,
get_output
=
m
[
"set_input"
],
m
[
"run"
],
m
[
"get_output"
]
# set input
data
=
tvm
.
nd
.
array
(
np
.
random
.
uniform
(
size
=
dshape
)
.
astype
(
dtype
))
set_input
(
"x"
,
data
)
# execute
run
()
# get outputs
out
=
tvm
.
nd
.
empty
(
oshape
,
dtype
)
get_output
(
0
,
out
)
y_np
=
topi
.
testing
.
softmax_python
(
data
.
asnumpy
())
np
.
testing
.
assert_allclose
(
out
.
asnumpy
(),
y_np
,
rtol
=
1e-5
)
if
__name__
==
"__main__"
:
test_softmax
()
nnvm/tests/python/unittest/test_top_level1.py
View file @
1bc5d0ad
...
...
@@ -5,7 +5,7 @@ def test_dense():
x1
=
sym
.
dense
(
x
,
units
=
3
,
name
=
"dense"
)
x2
=
sym
.
flatten
(
x1
)
x3
=
sym
.
softmax
(
x2
)
assert
x
2
.
list_input_names
()
==
[
'x'
,
'dense_weight'
,
'dense_bias'
]
assert
x
3
.
list_input_names
()
==
[
'x'
,
'dense_weight'
,
'dense_bias'
]
def
test_concatenate_split
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment