Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
cc7cbbe7
Commit
cc7cbbe7
authored
Nov 19, 2017
by
Lianmin Zheng
Committed by
Tianqi Chen
May 29, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[BUILD] add target_host to compiler.build (#240)
parent
2b15684f
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
32 additions
and
4 deletions
+32
-4
nnvm/include/nnvm/graph.h
+11
-0
nnvm/python/nnvm/compiler/build_module.py
+16
-3
nnvm/src/compiler/graph_fuse.cc
+5
-1
No files found.
nnvm/include/nnvm/graph.h
View file @
cc7cbbe7
...
...
@@ -48,6 +48,12 @@ class Graph {
template
<
typename
T
>
inline
const
T
&
GetAttr
(
const
std
::
string
&
attr_name
)
const
;
/*!
* \brief Check whether has a specific attribute.
* \param attr_name the name of the attribute
* \return a boolean result
*/
inline
bool
HasAttr
(
const
std
::
string
&
attr_name
)
const
;
/*!
* \brief Get a move copy of the attribute, implement copy on write semantics.
* The content is moved if the reference counter of shared_ptr is 1.
* The attribute is erased from attrs after the call.
...
...
@@ -226,6 +232,11 @@ inline const T& Graph::GetAttr(const std::string& attr_name) const {
return
nnvm
::
get
<
T
>
(
*
it
->
second
);
}
inline
bool
Graph
::
HasAttr
(
const
std
::
string
&
attr_name
)
const
{
auto
it
=
attrs
.
find
(
attr_name
);
return
it
!=
attrs
.
end
();
}
template
<
typename
T
>
inline
T
Graph
::
MoveCopyAttr
(
const
std
::
string
&
attr_name
)
{
auto
it
=
attrs
.
find
(
attr_name
);
...
...
nnvm/python/nnvm/compiler/build_module.py
View file @
cc7cbbe7
...
...
@@ -112,8 +112,10 @@ def _lower(sch, inputs, func_name, graph):
@tvm.register_func
(
"nnvm.compiler.build_target"
)
def
_build
(
funcs
,
target
):
return
tvm
.
build
(
funcs
,
target
=
target
)
def
_build
(
funcs
,
target
,
target_host
):
if
target_host
==
""
:
target_host
=
None
return
tvm
.
build
(
funcs
,
target
=
target
,
target_host
=
target_host
)
def
_update_shape_dtype
(
shape
,
dtype
,
params
):
...
...
@@ -161,7 +163,7 @@ def optimize(graph, shape, dtype="float32"):
return
graph
def
build
(
graph
,
target
=
None
,
shape
=
None
,
dtype
=
"float32"
,
params
=
None
):
def
build
(
graph
,
target
=
None
,
shape
=
None
,
dtype
=
"float32"
,
params
=
None
,
target_host
=
None
):
"""Build graph into runtime library.
The build function will optimize the graph and do the compilation.
...
...
@@ -189,6 +191,15 @@ def build(graph, target=None, shape=None, dtype="float32", params=None):
during inference time. Used for pre-compute
folding optimization.
target_host : str or :any:`tvm.target.Target` optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
Returns
-------
graph : Graph
...
...
@@ -228,6 +239,8 @@ def build(graph, target=None, shape=None, dtype="float32", params=None):
graph
=
graph_attr
.
set_shape_inputs
(
graph
,
shape
)
graph
=
graph_attr
.
set_dtype_inputs
(
graph
,
dtype
)
graph
.
_set_json_attr
(
"target"
,
str
(
target
),
"str"
)
if
target_host
is
not
None
:
graph
.
_set_json_attr
(
"target_host"
,
str
(
target_host
),
"str"
)
if
cfg
.
pass_enabled
(
"OpFusion"
):
graph
.
_set_json_attr
(
"opt_level"
,
1
,
"int"
)
else
:
...
...
nnvm/src/compiler/graph_fuse.cc
View file @
cc7cbbe7
...
...
@@ -219,6 +219,10 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) {
const
std
::
vector
<
TOpPattern
>&
pattern_vec
=
g
.
GetAttr
<
std
::
vector
<
TOpPattern
>
>
(
"pattern"
);
std
::
string
target
=
g
.
GetAttr
<
std
::
string
>
(
"target"
);
std
::
string
target_host
;
if
(
g
.
HasAttr
(
"target_host"
))
target_host
=
g
.
GetAttr
<
std
::
string
>
(
"target_host"
);
std
::
vector
<
FuseEntry
>
fuse_vec
(
idx
.
num_nodes
());
// setup inputs and placeholder.
for
(
uint32_t
nid
=
0
;
nid
<
idx
.
num_nodes
();
++
nid
)
{
...
...
@@ -398,7 +402,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) {
ret
.
attrs
[
"dltype"
]
=
std
::
make_shared
<
any
>
(
std
::
move
(
new_dltype_vec
));
// Setup module
static
const
PackedFunc
&
fbuild
=
GetPackedFunc
(
"nnvm.compiler.build_target"
);
tvm
::
runtime
::
Module
module
=
fbuild
(
func_list
,
target
);
tvm
::
runtime
::
Module
module
=
fbuild
(
func_list
,
target
,
target_host
);
ret
.
attrs
[
"module"
]
=
std
::
make_shared
<
any
>
(
std
::
move
(
module
));
ret
=
nnvm
::
ApplyPass
(
ret
,
"PlanMemory"
);
return
ret
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment