Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
4d314833
Unverified
Commit
4d314833
authored
Jul 21, 2019
by
Tianqi Chen
Committed by
GitHub
Jul 21, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[CI] Upgrade LLVM envs (#3590)
parent
5b5ae980
Show whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
74 additions
and
71 deletions
+74
-71
Jenkinsfile
+7
-7
docker/Dockerfile.ci_gpu
+0
-2
docker/Dockerfile.demo_android
+2
-2
docker/install/ubuntu_install_keras.sh
+0
-24
docker/install/ubuntu_install_llvm.sh
+5
-5
docker/install/ubuntu_install_mxnet.sh
+1
-1
docker/install/ubuntu_install_onnx.sh
+1
-1
docker/install/ubuntu_install_tensorflow.sh
+1
-1
python/tvm/contrib/clang.py
+3
-1
python/tvm/relay/grammar/py3/RelayLexer.py
+1
-1
python/tvm/relay/grammar/py3/RelayParser.py
+1
-1
python/tvm/relay/grammar/py3/RelayVisitor.py
+1
-1
src/arithmetic/int_set.cc
+1
-1
src/codegen/llvm/codegen_cpu.cc
+30
-6
src/codegen/llvm/codegen_cpu.h
+4
-1
src/codegen/llvm/codegen_llvm.cc
+2
-4
src/codegen/llvm/codegen_llvm.h
+5
-5
src/lang/expr_operator.cc
+0
-1
src/relay/ir/expr_functor.cc
+1
-1
src/relay/pass/eta_expand.cc
+1
-3
tests/python/frontend/mxnet/test_forward.py
+3
-2
tests/python/unittest/test_codegen_llvm.py
+4
-0
No files found.
Jenkinsfile
View file @
4d314833
...
...
@@ -39,9 +39,9 @@
// - Periodically cleanup the old versions on local workers
//
ci_lint
=
"tvmai/ci-lint:v0.51"
ci_gpu
=
"tvmai/ci-gpu:v0.5
2
"
ci_cpu
=
"tvmai/ci-cpu:v0.5
0
"
ci_i386
=
"tvmai/ci-i386:v0.5
0
"
ci_gpu
=
"tvmai/ci-gpu:v0.5
3
"
ci_cpu
=
"tvmai/ci-cpu:v0.5
1
"
ci_i386
=
"tvmai/ci-i386:v0.5
1
"
// tvm libraries
tvm_runtime
=
"build/libtvm_runtime.so, build/config.cmake"
...
...
@@ -136,7 +136,7 @@ stage('Build') {
echo set\\(USE_CUDA ON\\) >> config.cmake
echo set\\(USE_OPENGL ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
6.0
\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
7
\\) >> config.cmake
echo set\\(USE_NNPACK ON\\) >> config.cmake
echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
echo set\\(USE_RPC ON\\) >> config.cmake
...
...
@@ -161,7 +161,7 @@ stage('Build') {
echo set\\(USE_VULKAN ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER clang-
6.0
\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER clang-
7
\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
"""
make
(
ci_gpu
,
'build2'
,
'-j2'
)
...
...
@@ -179,7 +179,7 @@ stage('Build') {
echo set\\(USE_SORT ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
4.0
\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
8
\\) >> config.cmake
echo set\\(USE_NNPACK ON\\) >> config.cmake
echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
echo set\\(USE_ANTLR ON\\) >> config.cmake
...
...
@@ -210,7 +210,7 @@ stage('Build') {
echo set\\(USE_SORT ON\\) >> config.cmake
echo set\\(USE_RPC ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
5
.0\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-
4
.0\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
"""
...
...
docker/Dockerfile.ci_gpu
View file @
4d314833
...
...
@@ -72,8 +72,6 @@ RUN bash /install/ubuntu_install_coreml.sh
COPY install/ubuntu_install_tensorflow.sh /install/ubuntu_install_tensorflow.sh
RUN bash /install/ubuntu_install_tensorflow.sh
COPY install/ubuntu_install_keras.sh /install/ubuntu_install_keras.sh
RUN bash /install/ubuntu_install_keras.sh
COPY install/ubuntu_install_darknet.sh /install/ubuntu_install_darknet.sh
RUN bash /install/ubuntu_install_darknet.sh
...
...
docker/Dockerfile.demo_android
View file @
4d314833
...
...
@@ -29,8 +29,8 @@ RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
COPY install/ubuntu_install_
keras.sh /install/ubuntu_install_keras
.sh
RUN bash /install/ubuntu_install_
keras
.sh
COPY install/ubuntu_install_
tensorflow.sh /install/ubuntu_install_tensorflow
.sh
RUN bash /install/ubuntu_install_
tensorflow
.sh
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
...
...
docker/install/ubuntu_install_keras.sh
deleted
100755 → 0
View file @
5b5ae980
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set
-e
set
-u
set
-o
pipefail
pip2 install keras tensorflow h5py
pip3 install keras tensorflow h5py
docker/install/ubuntu_install_llvm.sh
View file @
4d314833
...
...
@@ -25,14 +25,14 @@ echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
5.0
main
\
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
7
main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
5.0
main
\
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
7
main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
6.0
main
\
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
8
main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
6.0
main
\
echo
deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-
8
main
\
>>
/etc/apt/sources.list.d/llvm.list
echo
deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main
\
...
...
@@ -41,4 +41,4 @@ echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
>>
/etc/apt/sources.list.d/llvm.list
wget
-q
-O
- http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
apt-get update
&&
apt-get install
-y
llvm-4.0 llvm-
5.0 llvm-6.0 clang-6.0
apt-get update
&&
apt-get install
-y
llvm-4.0 llvm-
9 llvm-8 llvm-7 clang-9 clang-8 clang-7
docker/install/ubuntu_install_mxnet.sh
View file @
4d314833
...
...
@@ -20,4 +20,4 @@ set -e
set
-u
set
-o
pipefail
pip3 install mxnet
pip3 install
mxnet
==
1.5.0
docker/install/ubuntu_install_onnx.sh
View file @
4d314833
...
...
@@ -21,7 +21,7 @@ set -u
set
-o
pipefail
# fix to certain version for now
pip3 install onnx
>
=
1.4.1
pip3 install
onnx
==
1.5.0
pip3 install https://download.pytorch.org/whl/cu80/torch-1.0.1.post2-cp36-cp36m-linux_x86_64.whl
pip3 install torchvision
docker/install/ubuntu_install_tensorflow.sh
View file @
4d314833
...
...
@@ -20,4 +20,4 @@ set -e
set
-u
set
-o
pipefail
pip3 install tensorflow
pip3 install
tensorflow
==
1.13.1 keras h5py
python/tvm/contrib/clang.py
View file @
4d314833
...
...
@@ -45,7 +45,9 @@ def find_clang(required=True):
"""
cc_list
=
[]
if
hasattr
(
codegen
,
"llvm_version_major"
):
cc_list
+=
[
"clang-
%
d.0"
%
codegen
.
llvm_version_major
()]
major
=
codegen
.
llvm_version_major
()
cc_list
+=
[
"clang-
%
d.0"
%
major
]
cc_list
+=
[
"clang-
%
d"
%
major
]
cc_list
+=
[
"clang"
]
cc_list
+=
[
"clang.exe"
]
valid_list
=
[
util
.
which
(
x
)
for
x
in
cc_list
]
...
...
python/tvm/relay/grammar/py3/RelayLexer.py
View file @
4d314833
# Generated from /
home/marisa/Work/tvm
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /
workspace
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
from
antlr4
import
*
from
io
import
StringIO
from
typing.io
import
TextIO
...
...
python/tvm/relay/grammar/py3/RelayParser.py
View file @
4d314833
# Generated from /
home/marisa/Work/tvm
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /
workspace
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# encoding: utf-8
from
antlr4
import
*
from
io
import
StringIO
...
...
python/tvm/relay/grammar/py3/RelayVisitor.py
View file @
4d314833
# Generated from /
home/marisa/Work/tvm
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /
workspace
/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
from
antlr4
import
*
if
__name__
is
not
None
and
"."
in
__name__
:
from
.RelayParser
import
RelayParser
...
...
src/arithmetic/int_set.cc
View file @
4d314833
...
...
@@ -755,7 +755,7 @@ IntSet EvalSet(Range r,
// Simplifying first can give tighter bounds if r->min and r->extent share variables
Expr
sum
=
r
->
min
+
r
->
extent
-
1
;
auto
res
=
m
.
Eval
(
IntervalSet
(
r
->
min
,
Simplify
(
sum
)));
return
res
;
return
std
::
move
(
res
)
;
}
IntSet
EvalSet
(
Range
r
,
...
...
src/codegen/llvm/codegen_cpu.cc
View file @
4d314833
...
...
@@ -18,13 +18,13 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file codegen_cpu.cc
*/
#ifdef TVM_LLVM_VERSION
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/ir_pass.h>
#include <memory>
#include <unordered_map>
#include "codegen_cpu.h"
#include "../../pass/ir_util.h"
...
...
@@ -38,6 +38,7 @@ void CodeGenCPU::Init(const std::string& module_name,
bool
system_lib
,
bool
dynamic_lookup
)
{
CodeGenLLVM
::
Init
(
module_name
,
tm
,
ctx
,
system_lib
,
dynamic_lookup
);
dbg_info_
=
CreateDebugInfo
(
module_
.
get
());
static_assert
(
sizeof
(
TVMValue
)
==
sizeof
(
double
),
"invariant"
);
func_handle_map_
.
clear
();
export_system_symbols_
.
clear
();
...
...
@@ -131,9 +132,9 @@ void CodeGenCPU::AddFunction(const LoweredFunc& f) {
AddDebugInformation
(
function_
);
}
// Following Glow |DebugInfo::generateFunctionDebugInfo|, https://git.io/fjadv
// Following Glow |DebugInfo::generateFunctionDebugInfo|, https://git.io/fjadv
void
CodeGenCPU
::
AddDebugInformation
(
llvm
::
Function
*
function
)
{
#if TVM_LLVM_VERSION >= 50
#if TVM_LLVM_VERSION >= 50
&& TVM_LLVM_VERSION < 70
CHECK
(
!
function
->
getSubprogram
());
llvm
::
SmallVector
<
llvm
::
Metadata
*
,
4
>
paramTys
;
llvm
::
DIType
*
returnTy
=
...
...
@@ -145,10 +146,26 @@ void CodeGenCPU::AddDebugInformation(llvm::Function* function) {
}
auto
*
DIFunctionTy
=
dbg_info_
->
di_builder_
->
createSubroutineType
(
dbg_info_
->
di_builder_
->
getOrCreateTypeArray
(
paramTys
));
#if TVM_LLVM_VERSION >= 80
auto
*
DIFunction
=
dbg_info_
->
di_builder_
->
createFunction
(
dbg_info_
->
file_
,
function
->
getName
(),
""
,
dbg_info_
->
file_
,
0
/* line number */
,
DIFunctionTy
,
false
/* internal linkage */
);
#else
auto
*
DIFunction
=
dbg_info_
->
di_builder_
->
createFunction
(
dbg_info_
->
file_
,
function
->
getName
(),
""
,
dbg_info_
->
file_
,
0
/* line number */
,
DIFunctionTy
,
false
/* internal linkage */
,
true
/* definition */
,
0
/* line number */
,
llvm
::
DINode
::
FlagPrototyped
,
true
/* isOptimized */
);
dbg_info_
->
file_
,
function
->
getName
(),
""
,
dbg_info_
->
file_
,
0
/* line number */
,
DIFunctionTy
,
false
,
/* internal linkage */
true
,
0
/* line number */
,
llvm
::
DINode
::
FlagPrototyped
,
true
/* isOptimized */
);
#endif
CHECK
(
DIFunction
);
function
->
setSubprogram
(
DIFunction
);
...
...
@@ -223,6 +240,13 @@ void CodeGenCPU::AddMainFunction(const std::string& entry_func_name) {
global
->
setInitializer
(
llvm
::
ConstantDataArray
::
getString
(
*
ctx_
,
entry_func_name
));
}
std
::
unique_ptr
<
llvm
::
Module
>
CodeGenCPU
::
Finish
()
{
// link modules
if
(
dbg_info_
!=
nullptr
)
{
dbg_info_
->
di_builder_
->
finalize
();
}
return
CodeGenLLVM
::
Finish
();
}
llvm
::
Value
*
CodeGenCPU
::
CreateStructRefPtr
(
Type
t
,
llvm
::
Value
*
buf
,
llvm
::
Value
*
index
,
int
kind
)
{
if
(
kind
<
intrinsic
::
kArrKindBound_
)
{
...
...
src/codegen/llvm/codegen_cpu.h
View file @
4d314833
...
...
@@ -18,7 +18,6 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file codegen_llvm_cpu.h
* \brief Common base class for generating into LLVM IR on CPU host.
*/
...
...
@@ -27,6 +26,7 @@
#include <utility>
#include <vector>
#include <memory>
#include <string>
#include <unordered_map>
#include "codegen_llvm.h"
...
...
@@ -44,6 +44,7 @@ class CodeGenCPU : public CodeGenLLVM {
bool
dynamic_lookup
)
override
;
void
AddFunction
(
const
LoweredFunc
&
f
)
override
;
void
AddMainFunction
(
const
std
::
string
&
entry_func_name
)
override
;
std
::
unique_ptr
<
llvm
::
Module
>
Finish
()
override
;
void
VisitStmt_
(
const
AssertStmt
*
op
)
override
;
void
VisitStmt_
(
const
AttrStmt
*
op
)
override
;
void
VisitStmt_
(
const
For
*
op
)
override
;
...
...
@@ -139,6 +140,8 @@ class CodeGenCPU : public CodeGenLLVM {
std
::
unordered_map
<
std
::
string
,
llvm
::
GlobalVariable
*>
func_handle_map_
;
// List of symbols to be exported to TVM system lib.
std
::
vector
<
std
::
pair
<
std
::
string
,
llvm
::
Value
*>
>
export_system_symbols_
;
// internal debug information, to be populated by
std
::
unique_ptr
<
DebugInfo
>
dbg_info_
;
// Get the DWARF type corresponding to the LLVM type |ty|. The current API in practice only
// generates |int32|, and |int8*|.
...
...
src/codegen/llvm/codegen_llvm.cc
View file @
4d314833
...
...
@@ -73,7 +73,6 @@ void CodeGenLLVM::Init(const std::string& module_name,
md_tbaa_root_
=
md_builder_
->
createTBAARoot
(
"tvm-tbaa"
);
md_tbaa_alias_set_
=
md_builder_
->
createTBAANode
(
"tvm-alias"
,
md_tbaa_root_
);
this
->
InitTarget
(
tm
);
dbg_info_
=
CreateDebugInfo
(
module_
.
get
());
}
void
CodeGenLLVM
::
InitTarget
(
llvm
::
TargetMachine
*
tm
)
{
...
...
@@ -171,8 +170,6 @@ void CodeGenLLVM::AddFunctionInternal(const LoweredFunc& f, bool ret_void) {
std
::
unique_ptr
<
llvm
::
Module
>
CodeGenLLVM
::
Finish
()
{
this
->
AddStartupFunction
();
// link modules
dbg_info_
->
di_builder_
->
finalize
();
for
(
size_t
i
=
0
;
i
<
link_modules_
.
size
();
++
i
)
{
CHECK
(
!
llvm
::
Linker
::
linkModules
(
*
module_
,
std
::
move
(
link_modules_
[
i
])))
<<
"Failed to link modules"
;
...
...
@@ -423,7 +420,8 @@ void CodeGenLLVM::GetAlignment(Type t,
*
p_alignment
=
align_bits
/
8
;
}
std
::
unique_ptr
<
CodeGenLLVM
::
DebugInfo
>
CodeGenLLVM
::
CreateDebugInfo
(
llvm
::
Module
*
module
)
{
std
::
unique_ptr
<
CodeGenLLVM
::
DebugInfo
>
CodeGenLLVM
::
CreateDebugInfo
(
llvm
::
Module
*
module
)
{
auto
debug_info
=
llvm
::
make_unique
<
CodeGenLLVM
::
DebugInfo
>
();
debug_info
->
di_builder_
=
llvm
::
make_unique
<
llvm
::
DIBuilder
>
(
*
module
);
// TODO(tulloch): pass this information through relay::Span classes to the LoweredFunc instance?
...
...
src/codegen/llvm/codegen_llvm.h
View file @
4d314833
...
...
@@ -293,16 +293,16 @@ class CodeGenLLVM :
std
::
unordered_set
<
const
Variable
*>
alias_var_set_
;
// set of volatile buffer.
std
::
unordered_set
<
const
Variable
*>
volatile_buf_
;
/*! \brief Helper struct for debug infos. */
struct
DebugInfo
{
std
::
unique_ptr
<
llvm
::
DIBuilder
>
di_builder_
;
llvm
::
DICompileUnit
*
compilation_unit_
{
nullptr
};
llvm
::
DIFile
*
file_
{
nullptr
};
};
std
::
unique_ptr
<
DebugInfo
>
dbg_info_
;
// Create a new DebugInfo struct from the given Module that initializes the |file_| and
// |compilation_unit_| to TVM defaults.
/*!
* \brief Create a new DebugInfo struct from the given Module that
* initializes file and compilation_unit_ to TVM defaults.
*/
static
std
::
unique_ptr
<
DebugInfo
>
CreateDebugInfo
(
llvm
::
Module
*
module
);
};
}
// namespace codegen
...
...
src/lang/expr_operator.cc
View file @
4d314833
...
...
@@ -18,7 +18,6 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file expr_operator.cc
*/
#include <tvm/base.h>
...
...
src/relay/ir/expr_functor.cc
View file @
4d314833
...
...
@@ -435,7 +435,7 @@ Expr Bind(const Expr& expr, const tvm::Map<Var, Expr>& args_map) {
func
->
type_params
,
func
->
attrs
);
CHECK_EQ
(
FreeVars
(
expr
).
size
(),
FreeVars
(
ret
).
size
());
return
ret
;
return
std
::
move
(
ret
)
;
}
else
{
return
ExprBinder
(
args_map
).
VisitExpr
(
expr
);
}
...
...
src/relay/pass/eta_expand.cc
View file @
4d314833
...
...
@@ -18,8 +18,6 @@
*/
/*!
* Copyright (c) 2019 by Contributors
*
* \file eta_expand.cc
*
* \brief Add abstraction over a function. For example, abs will become (fun x -> abs x).
...
...
@@ -61,7 +59,7 @@ Expr EtaExpand(const Expr& e, const Module& mod) {
auto
new_func
=
FunctionNode
::
make
(
args
,
CallNode
::
make
(
e
,
params
),
ret_type
,
original_type_params
);
return
new_func
;
return
std
::
move
(
new_func
)
;
}
namespace
transform
{
...
...
tests/python/frontend/mxnet/test_forward.py
View file @
4d314833
...
...
@@ -598,9 +598,10 @@ def test_forward_rnn_layer():
verify
(
mode
,
10
,
64
,
64
,
2
)
verify
(
mode
,
10
,
64
,
32
,
2
)
verify
(
mode
,
10
,
64
,
32
,
2
,
batch
=
2
)
verify
(
mode
,
10
,
64
,
64
,
3
,
init_states
=
False
)
verify
(
mode
,
10
,
32
,
64
,
1
,
bidirectional
=
True
)
verify
(
mode
,
10
,
64
,
64
,
3
,
batch
=
2
,
bidirectional
=
True
,
init_states
=
False
)
# The following two codeblocks need to be fixed for mxnet 1.5
# verify(mode, 10, 64, 64, 3, init_states=False)
# verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
def
test_forward_Crop
():
def
verify
(
xshape
,
yshape
,
offset
=
None
):
...
...
tests/python/unittest/test_codegen_llvm.py
View file @
4d314833
...
...
@@ -487,6 +487,8 @@ def test_dwarf_debug_information():
return
if
tvm
.
codegen
.
llvm_version_major
()
<
5
:
return
if
tvm
.
codegen
.
llvm_version_major
()
>
6
:
return
# build two functions
f2
=
tvm
.
lower
(
s
,
[
A
,
B
,
C
],
name
=
"fadd1"
)
f1
=
tvm
.
lower
(
s
,
[
A
,
B
,
C
],
name
=
"fadd2"
)
...
...
@@ -522,6 +524,8 @@ def test_dwarf_debug_information():
return
if
tvm
.
codegen
.
llvm_version_major
()
<
5
:
return
if
tvm
.
codegen
.
llvm_version_major
()
>
6
:
return
# build two functions
f2
=
tvm
.
lower
(
s
,
[
A
,
B
,
C
],
name
=
"fadd1"
)
f1
=
tvm
.
lower
(
s
,
[
A
,
B
,
C
],
name
=
"fadd2"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment