Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
c29205da
Commit
c29205da
authored
Mar 30, 2018
by
Tianqi Chen
Committed by
GitHub
Mar 30, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[DOCS] Try upgrade build (#1066)
parent
1229e2d0
Show whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
75 additions
and
83 deletions
+75
-83
Jenkinsfile
+1
-38
python/tvm/_ffi/function.py
+2
-2
python/tvm/api.py
+1
-0
tests/ci_build/Dockerfile.emscripten
+4
-0
tests/ci_build/Dockerfile.gpu
+4
-5
tests/ci_build/install/ubuntu_install_core.sh
+1
-1
tests/ci_build/install/ubuntu_install_llvm.sh
+5
-0
tests/ci_build/install/ubuntu_install_opencl.sh
+2
-2
tests/ci_build/install/ubuntu_install_sphinx.sh
+1
-1
topi/python/topi/cuda/conv2d_transpose_nchw.py
+1
-0
topi/python/topi/cuda/dense.py
+1
-0
topi/python/topi/cuda/depthwise_conv2d.py
+2
-0
topi/python/topi/cuda/pooling.py
+2
-0
topi/python/topi/cuda/reduction.py
+2
-0
topi/python/topi/mali/dense.py
+1
-0
topi/python/topi/mali/depthwise_conv2d.py
+1
-0
topi/python/topi/nn/bnn.py
+1
-0
topi/python/topi/opengl/conv2d_nchw.py
+1
-0
topi/python/topi/opengl/dense.py
+1
-0
topi/python/topi/opengl/pooling.py
+2
-0
topi/python/topi/rasp/depthwise_conv2d.py
+1
-0
topi/python/topi/x86/binary_dense.py
+1
-0
tutorials/deployment/cross_compilation_and_rpc.py
+37
-34
No files found.
Jenkinsfile
View file @
c29205da
...
...
@@ -87,7 +87,6 @@ stage('Build') {
cp make/config.mk .
echo USE_CUDNN=1 >> config.mk
echo USE_CUDA=1 >> config.mk
echo USE_OPENCL=1 >> config.mk
echo USE_OPENGL=1 >> config.mk
echo LLVM_CONFIG=llvm-config-4.0 >> config.mk
echo USE_RPC=1 >> config.mk
...
...
@@ -105,6 +104,7 @@ stage('Build') {
sh
"mv lib/libtvm.so lib/libtvm_llvm60.so"
pack_lib
(
'gpu'
,
tvm_multilib
)
sh
"""
echo USE_OPENCL=1 >> config.mk
echo USE_ROCM=1 >> config.mk
echo ROCM_PATH=/opt/rocm >> config.mk
echo USE_VULKAN=1 >> config.mk
...
...
@@ -152,31 +152,6 @@ stage('Build') {
pack_lib
(
'i386'
,
tvm_multilib
)
}
}
},
'web'
:
{
node
(
'emcc'
)
{
ws
(
'workspace/tvm/build-weblib'
)
{
init_git
()
sh
"""
cp make/config.mk .
echo USE_CUDA=0 >> config.mk
echo USE_OPENCL=0 >> config.mk
echo LLVM_CONFIG=llvm-config >> config.mk
echo USE_RPC=0 >> config.mk
"""
sh
"${docker_run} emscripten echo testing javascript..."
timeout
(
time:
max_time
,
unit:
'MINUTES'
)
{
try
{
sh
"${docker_run} emscripten ./tests/scripts/task_web_build.sh"
}
catch
(
exc
)
{
echo
'Incremental compilation failed. Fall back to build from scratch'
sh
"${docker_run} emscripten make clean"
sh
"${docker_run} emscripten ./tests/scripts/task_web_build.sh"
}
}
pack_lib
(
'weblib'
,
tvm_lib
)
}
}
}
}
...
...
@@ -256,18 +231,6 @@ stage('Integration Test') {
}
}
},
'web'
:
{
node
(
'emcc'
)
{
ws
(
'workspace/tvm/it-weblib'
)
{
init_git
()
unpack_lib
(
'weblib'
,
tvm_lib
)
sh
"${docker_run} emscripten echo testing javascript..."
timeout
(
time:
max_time
,
unit:
'MINUTES'
)
{
sh
"${docker_run} emscripten ./tests/scripts/task_web_test.sh"
}
}
}
},
'docs'
:
{
node
(
'GPU'
&&
'linux'
)
{
ws
(
'workspace/tvm/docs-python-gpu'
)
{
...
...
python/tvm/_ffi/function.py
View file @
c29205da
...
...
@@ -181,9 +181,9 @@ def register_func(func_name, f=None, override=False):
myf
=
convert_to_tvm_func
(
myf
)
check_call
(
_LIB
.
TVMFuncRegisterGlobal
(
c_str
(
func_name
),
myf
.
handle
,
ioverride
))
return
myf
if
f
:
register
(
f
)
else
:
return
register
(
f
)
return
register
...
...
python/tvm/api.py
View file @
c29205da
...
...
@@ -652,6 +652,7 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
for
i
in
range
(
size
))
return
outputs
[
0
]
if
size
==
1
else
outputs
# pylint: disable=keyword-arg-before-vararg
def
reducer
(
expr
,
axis
,
where
=
None
,
*
args
):
if
isinstance
(
axis
,
(
_schedule
.
IterVar
,
list
,
tuple
)):
assert
not
args
...
...
tests/ci_build/Dockerfile.emscripten
View file @
c29205da
...
...
@@ -15,4 +15,8 @@ RUN bash /install/ubuntu_install_emscripten.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
RUN chmod a+rwx -R /emsdk-portable
RUN cp -r /emsdk-portable /emsdk-portable-backup
RUN mv /emsdk-portable /emsdk-portable-x
RUN mv /emsdk-portable-backup /emsdk-portable
RUN cp /root/.emscripten /emsdk-portable/
tests/ci_build/Dockerfile.gpu
View file @
c29205da
FROM nvidia/cuda:8.0-cudnn7-devel
# Base scripts
RUN apt-get update --fix-missing
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
RUN bash /install/ubuntu_install_core.sh
...
...
@@ -12,9 +11,6 @@ RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
RUN bash /install/ubuntu_install_llvm.sh
COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
RUN bash /install/ubuntu_install_opencl.sh
COPY install/ubuntu_install_iverilog.sh /install/ubuntu_install_iverilog.sh
RUN bash /install/ubuntu_install_iverilog.sh
...
...
@@ -40,8 +36,11 @@ RUN bash /install/ubuntu_install_rocm.sh
COPY install/ubuntu_install_opengl.sh /install/ubuntu_install_opengl.sh
RUN bash /install/ubuntu_install_opengl.sh
COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
RUN bash /install/ubuntu_install_opencl.sh
# Enable doxygen for c++ doc build
RUN apt-get install -y doxygen graphviz
RUN apt-get
update && apt-get
install -y doxygen graphviz
# Install vulkan
COPY install/ubuntu_install_vulkan.sh /install/ubuntu_install_vulkan.sh
...
...
tests/ci_build/install/ubuntu_install_core.sh
View file @
c29205da
# install libraries for building c++ core on ubuntu
apt-get install
-y
--no-install-recommends
--force-yes
\
apt-get
update
&&
apt-get
install
-y
--no-install-recommends
--force-yes
\
git make libgtest-dev cmake wget unzip libtinfo-dev libz-dev
\
libcurl4-openssl-dev libopenblas-dev g++
sudo
...
...
tests/ci_build/install/ubuntu_install_llvm.sh
View file @
c29205da
...
...
@@ -8,6 +8,11 @@ echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main
\
...
...
tests/ci_build/install/ubuntu_install_opencl.sh
View file @
c29205da
# Install OpenCL runtime in nvidia docker.
apt-get install
-y
--no-install-recommends
--force-yes
\
ocl-icd-
libopencl1
\
apt-get
update
&&
apt-get
install
-y
--no-install-recommends
--force-yes
\
ocl-icd-
opencl-dev
\
clinfo
&&
\
rm
-rf
/var/lib/apt/lists/
*
...
...
tests/ci_build/install/ubuntu_install_sphinx.sh
View file @
c29205da
pip install
sphinx
==
1.6.2
sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>
=
0.7.3 docutils>
=
0.11
pip install sphinx sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>
=
0.7.3 docutils>
=
0.11
topi/python/topi/cuda/conv2d_transpose_nchw.py
View file @
c29205da
...
...
@@ -74,6 +74,7 @@ def schedule_conv2d_transpose_small_batch(outs):
conv2d_56_64_64
(
s
,
Filter
,
temp_S
,
Filter_S
,
Out
,
Out_L
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_injective
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/cuda/dense.py
View file @
c29205da
...
...
@@ -87,6 +87,7 @@ def schedule_dense(outs):
s
[
Out
]
.
set_store_predicate
(
thread_x
.
var
.
equal
(
0
))
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/cuda/depthwise_conv2d.py
View file @
c29205da
...
...
@@ -102,6 +102,7 @@ def schedule_depthwise_conv2d_nchw(outs):
s
[
FS
]
.
bind
(
tx
,
thread_x
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
@@ -178,6 +179,7 @@ def schedule_depthwise_conv2d_nhwc(outs):
s
[
FS
]
.
bind
(
fused
,
thread_x
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/cuda/pooling.py
View file @
c29205da
...
...
@@ -47,6 +47,7 @@ def schedule_global_pool(outs):
s
[
Pool
]
.
compute_at
(
s
[
Out
],
tx
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
@@ -101,6 +102,7 @@ def schedule_pool(outs):
s
[
Pool
]
.
compute_at
(
s
[
Out
],
tx
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/cuda/reduction.py
View file @
c29205da
...
...
@@ -87,6 +87,7 @@ def schedule_reduce(outs):
sch
=
tvm
.
create_schedule
([
x
.
op
for
x
in
outs
])
def
traverse_before_reduce
(
operator
):
"""Internal travserse function"""
if
isinstance
(
operator
,
tvm
.
tensor
.
PlaceholderOp
):
return
elif
tag
.
is_injective
(
operator
.
tag
):
...
...
@@ -97,6 +98,7 @@ def schedule_reduce(outs):
raise
RuntimeError
(
"Unsupported operator:
%
s"
%
operator
.
tag
)
def
traverse_after_reduce
(
operator
):
"""Internal travserse function"""
if
tag
.
is_broadcast
(
operator
.
tag
):
raise
RuntimeError
(
"Not yet support ewise after reduce"
)
elif
operator
.
tag
==
'comm_reduce'
:
...
...
topi/python/topi/mali/dense.py
View file @
c29205da
...
...
@@ -82,6 +82,7 @@ def schedule_dense(outs):
# print(tvm.lower(s, [data, weight, bias, outs[0]], simple_mode=True))
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/mali/depthwise_conv2d.py
View file @
c29205da
...
...
@@ -87,6 +87,7 @@ def schedule_depthwise_conv2d_nchw(outs):
s
[
conv
]
.
compute_at
(
s
[
output
],
ji
)
def
traverse
(
op
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
op
.
tag
):
if
op
not
in
s
.
outputs
:
...
...
topi/python/topi/nn/bnn.py
View file @
c29205da
...
...
@@ -43,6 +43,7 @@ def binarize_pack(data, axis=None, name="PackedInput"):
if
j
==
31
:
return
packed
packed
=
packed
<<
1
raise
RuntimeError
(
"not resach"
)
return
tvm
.
compute
(
oshape
,
_binarize_pack
,
name
=
name
,
tag
=
'binarize_pack'
)
...
...
topi/python/topi/opengl/conv2d_nchw.py
View file @
c29205da
...
...
@@ -31,6 +31,7 @@ def schedule_conv2d_nchw(outs):
s
[
data
]
.
opengl
()
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/opengl/dense.py
View file @
c29205da
...
...
@@ -31,6 +31,7 @@ def schedule_dense(outs):
s
[
Out
]
.
opengl
()
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/opengl/pooling.py
View file @
c29205da
...
...
@@ -30,6 +30,7 @@ def schedule_global_pool(outs):
s
[
Out
]
.
opengl
()
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
@@ -75,6 +76,7 @@ def schedule_pool(outs):
s
[
Out
]
.
opengl
()
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
topi/python/topi/rasp/depthwise_conv2d.py
View file @
c29205da
...
...
@@ -164,6 +164,7 @@ def schedule_depthwise_conv2d(outs):
s
=
tvm
.
create_schedule
([
x
.
op
for
x
in
outs
])
def
traverse
(
op
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
op
.
tag
):
if
op
not
in
s
.
outputs
:
...
...
topi/python/topi/x86/binary_dense.py
View file @
c29205da
...
...
@@ -35,6 +35,7 @@ def schedule_binary_dense(outs):
s
[
Out
]
.
vectorize
(
xi
)
def
traverse
(
OP
):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if
tag
.
is_broadcast
(
OP
.
tag
):
if
OP
not
in
s
.
outputs
:
...
...
tutorials/deployment/cross_compilation_and_rpc.py
View file @
c29205da
...
...
@@ -108,8 +108,6 @@ import tvm
import
numpy
as
np
from
tvm.contrib
import
rpc
,
util
server
=
rpc
.
Server
(
host
=
'0.0.0.0'
,
port
=
9090
,
use_popen
=
True
)
######################################################################
# Declare and Cross Compile Kernel on Local Machine
# -------------------------------------------------
...
...
@@ -241,47 +239,52 @@ print('%g secs/op' % cost)
# But here we set 'llvm' to enable this tutorial to run locally.
#
# Also we need to build the runtime with the flag `USE_OPENCL=1`.
# build kernel (different from cpu, we need bind axis for OpenCL)
s
=
tvm
.
create_schedule
(
B
.
op
)
xo
,
xi
=
s
[
B
]
.
split
(
B
.
op
.
axis
[
0
],
factor
=
32
)
s
[
B
]
.
bind
(
xo
,
tvm
.
thread_axis
(
"blockIdx.x"
))
s
[
B
]
.
bind
(
xi
,
tvm
.
thread_axis
(
"threadIdx.x"
))
f
=
tvm
.
build
(
s
,
[
A
,
B
],
"opencl"
,
target_host
=
"llvm"
,
name
=
"myadd"
)
#
# The following functions shows how we can deploy CL
def
deploy_cl
():
s
=
tvm
.
create_schedule
(
B
.
op
)
xo
,
xi
=
s
[
B
]
.
split
(
B
.
op
.
axis
[
0
],
factor
=
32
)
s
[
B
]
.
bind
(
xo
,
tvm
.
thread_axis
(
"blockIdx.x"
))
s
[
B
]
.
bind
(
xi
,
tvm
.
thread_axis
(
"threadIdx.x"
))
f
=
tvm
.
build
(
s
,
[
A
,
B
],
"opencl"
,
target_host
=
"llvm"
,
name
=
"myadd"
)
# save files
path_o
=
temp
.
relpath
(
"myadd.o"
)
path_cl
=
temp
.
relpath
(
"myadd.cl"
)
path_json
=
temp
.
relpath
(
"myadd.tvm_meta.json"
)
f
.
save
(
path_o
)
f
.
imported_modules
[
0
]
.
save
(
path_cl
)
# save files
path_o
=
temp
.
relpath
(
"myadd.o"
)
path_cl
=
temp
.
relpath
(
"myadd.cl"
)
path_json
=
temp
.
relpath
(
"myadd.tvm_meta.json"
)
f
.
save
(
path_o
)
f
.
imported_modules
[
0
]
.
save
(
path_cl
)
# upload files
remote
.
upload
(
path_o
)
remote
.
upload
(
path_cl
)
remote
.
upload
(
path_json
)
# upload files
remote
.
upload
(
path_o
)
remote
.
upload
(
path_cl
)
remote
.
upload
(
path_json
)
# load files on remote device
fhost
=
remote
.
load_module
(
"myadd.o"
)
fdev
=
remote
.
load_module
(
"myadd.cl"
)
fhost
.
import_module
(
fdev
)
# load files on remote device
fhost
=
remote
.
load_module
(
"myadd.o"
)
fdev
=
remote
.
load_module
(
"myadd.cl"
)
fhost
.
import_module
(
fdev
)
# run
ctx
=
remote
.
cl
(
0
)
a
=
tvm
.
nd
.
array
(
np
.
random
.
uniform
(
size
=
1024
)
.
astype
(
A
.
dtype
),
ctx
)
b
=
tvm
.
nd
.
array
(
np
.
zeros
(
1024
,
dtype
=
A
.
dtype
),
ctx
)
fhost
(
a
,
b
)
np
.
testing
.
assert_equal
(
b
.
asnumpy
(),
a
.
asnumpy
()
+
1
)
# run
ctx
=
remote
.
cl
(
0
)
a
=
tvm
.
nd
.
array
(
np
.
random
.
uniform
(
size
=
1024
)
.
astype
(
A
.
dtype
),
ctx
)
b
=
tvm
.
nd
.
array
(
np
.
zeros
(
1024
,
dtype
=
A
.
dtype
),
ctx
)
fhost
(
a
,
b
)
np
.
testing
.
assert_equal
(
b
.
asnumpy
(),
a
.
asnumpy
()
+
1
)
#####################################################################
# Instead of uploading files separately, there is a more convinient way.
# You can export libraray as a tar ball.
path_tar
=
temp
.
relpath
(
"myadd.tar"
)
f
.
export_library
(
path_tar
)
remote
.
upload
(
path_tar
)
fhost
=
remote
.
load_module
(
"myadd.tar"
)
fhost
(
a
,
b
)
np
.
testing
.
assert_equal
(
b
.
asnumpy
(),
a
.
asnumpy
()
+
1
)
# The following functions shows how we can deploy by tar ball
def
deploy_cl_by_tar
():
path_tar
=
temp
.
relpath
(
"myadd.tar"
)
f
.
export_library
(
path_tar
)
remote
.
upload
(
path_tar
)
fhost
=
remote
.
load_module
(
"myadd.tar"
)
fhost
(
a
,
b
)
np
.
testing
.
assert_equal
(
b
.
asnumpy
(),
a
.
asnumpy
()
+
1
)
# terminate the server after experiment
server
.
terminate
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment