Unverified Commit 4d314833 by Tianqi Chen Committed by GitHub

[CI] Upgrade LLVM envs (#3590)

parent 5b5ae980
......@@ -39,9 +39,9 @@
// - Periodically cleanup the old versions on local workers
//
ci_lint = "tvmai/ci-lint:v0.51"
ci_gpu = "tvmai/ci-gpu:v0.52"
ci_cpu = "tvmai/ci-cpu:v0.50"
ci_i386 = "tvmai/ci-i386:v0.50"
ci_gpu = "tvmai/ci-gpu:v0.53"
ci_cpu = "tvmai/ci-cpu:v0.51"
ci_i386 = "tvmai/ci-i386:v0.51"
// tvm libraries
tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
......@@ -136,7 +136,7 @@ stage('Build') {
echo set\\(USE_CUDA ON\\) >> config.cmake
echo set\\(USE_OPENGL ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-6.0\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-7\\) >> config.cmake
echo set\\(USE_NNPACK ON\\) >> config.cmake
echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
echo set\\(USE_RPC ON\\) >> config.cmake
......@@ -161,7 +161,7 @@ stage('Build') {
echo set\\(USE_VULKAN ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER clang-6.0\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER clang-7\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
"""
make(ci_gpu, 'build2', '-j2')
......@@ -179,7 +179,7 @@ stage('Build') {
echo set\\(USE_SORT ON\\) >> config.cmake
echo set\\(USE_MICRO ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-4.0\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-8\\) >> config.cmake
echo set\\(USE_NNPACK ON\\) >> config.cmake
echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
echo set\\(USE_ANTLR ON\\) >> config.cmake
......@@ -210,7 +210,7 @@ stage('Build') {
echo set\\(USE_SORT ON\\) >> config.cmake
echo set\\(USE_RPC ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-5.0\\) >> config.cmake
echo set\\(USE_LLVM llvm-config-4.0\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
"""
......
......@@ -72,8 +72,6 @@ RUN bash /install/ubuntu_install_coreml.sh
COPY install/ubuntu_install_tensorflow.sh /install/ubuntu_install_tensorflow.sh
RUN bash /install/ubuntu_install_tensorflow.sh
COPY install/ubuntu_install_keras.sh /install/ubuntu_install_keras.sh
RUN bash /install/ubuntu_install_keras.sh
COPY install/ubuntu_install_darknet.sh /install/ubuntu_install_darknet.sh
RUN bash /install/ubuntu_install_darknet.sh
......
......@@ -29,8 +29,8 @@ RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
COPY install/ubuntu_install_keras.sh /install/ubuntu_install_keras.sh
RUN bash /install/ubuntu_install_keras.sh
COPY install/ubuntu_install_tensorflow.sh /install/ubuntu_install_tensorflow.sh
RUN bash /install/ubuntu_install_tensorflow.sh
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
......
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
set -o pipefail
pip2 install keras tensorflow h5py
pip3 install keras tensorflow h5py
......@@ -25,14 +25,14 @@ echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
......@@ -41,4 +41,4 @@ echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
>> /etc/apt/sources.list.d/llvm.list
wget -q -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
apt-get update && apt-get install -y llvm-4.0 llvm-5.0 llvm-6.0 clang-6.0
apt-get update && apt-get install -y llvm-4.0 llvm-9 llvm-8 llvm-7 clang-9 clang-8 clang-7
......@@ -20,4 +20,4 @@ set -e
set -u
set -o pipefail
pip3 install mxnet
pip3 install mxnet==1.5.0
......@@ -21,7 +21,7 @@ set -u
set -o pipefail
# fix to certain version for now
pip3 install onnx>=1.4.1
pip3 install onnx==1.5.0
pip3 install https://download.pytorch.org/whl/cu80/torch-1.0.1.post2-cp36-cp36m-linux_x86_64.whl
pip3 install torchvision
......@@ -20,4 +20,4 @@ set -e
set -u
set -o pipefail
pip3 install tensorflow
pip3 install tensorflow==1.13.1 keras h5py
......@@ -45,7 +45,9 @@ def find_clang(required=True):
"""
cc_list = []
if hasattr(codegen, "llvm_version_major"):
cc_list += ["clang-%d.0" % codegen.llvm_version_major()]
major = codegen.llvm_version_major()
cc_list += ["clang-%d.0" % major]
cc_list += ["clang-%d" % major]
cc_list += ["clang"]
cc_list += ["clang.exe"]
valid_list = [util.which(x) for x in cc_list]
......
# Generated from /home/marisa/Work/tvm/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /workspace/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
from antlr4 import *
from io import StringIO
from typing.io import TextIO
......
# Generated from /home/marisa/Work/tvm/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /workspace/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
......
# Generated from /home/marisa/Work/tvm/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# Generated from /workspace/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .RelayParser import RelayParser
......
......@@ -755,7 +755,7 @@ IntSet EvalSet(Range r,
// Simplifying first can give tighter bounds if r->min and r->extent share variables
Expr sum = r->min + r->extent - 1;
auto res = m.Eval(IntervalSet(r->min, Simplify(sum)));
return res;
return std::move(res);
}
IntSet EvalSet(Range r,
......
......@@ -18,13 +18,13 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file codegen_cpu.cc
*/
#ifdef TVM_LLVM_VERSION
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/ir_pass.h>
#include <memory>
#include <unordered_map>
#include "codegen_cpu.h"
#include "../../pass/ir_util.h"
......@@ -38,6 +38,7 @@ void CodeGenCPU::Init(const std::string& module_name,
bool system_lib,
bool dynamic_lookup) {
CodeGenLLVM::Init(module_name, tm, ctx, system_lib, dynamic_lookup);
dbg_info_ = CreateDebugInfo(module_.get());
static_assert(sizeof(TVMValue) == sizeof(double), "invariant");
func_handle_map_.clear();
export_system_symbols_.clear();
......@@ -131,9 +132,9 @@ void CodeGenCPU::AddFunction(const LoweredFunc& f) {
AddDebugInformation(function_);
}
// Following Glow |DebugInfo::generateFunctionDebugInfo|, https://git.io/fjadv
// Following Glow |DebugInfo::generateFunctionDebugInfo|, https://git.io/fjadv
void CodeGenCPU::AddDebugInformation(llvm::Function* function) {
#if TVM_LLVM_VERSION >= 50
#if TVM_LLVM_VERSION >= 50 && TVM_LLVM_VERSION < 70
CHECK(!function->getSubprogram());
llvm::SmallVector<llvm::Metadata*, 4> paramTys;
llvm::DIType* returnTy =
......@@ -145,10 +146,26 @@ void CodeGenCPU::AddDebugInformation(llvm::Function* function) {
}
auto* DIFunctionTy = dbg_info_->di_builder_->createSubroutineType(
dbg_info_->di_builder_->getOrCreateTypeArray(paramTys));
#if TVM_LLVM_VERSION >= 80
auto* DIFunction = dbg_info_->di_builder_->createFunction(
dbg_info_->file_, function->getName(), "",
dbg_info_->file_,
0 /* line number */,
DIFunctionTy,
false /* internal linkage */);
#else
auto* DIFunction = dbg_info_->di_builder_->createFunction(
dbg_info_->file_, function->getName(), "", dbg_info_->file_, 0 /* line number */,
DIFunctionTy, false /* internal linkage */, true /* definition */, 0 /* line number */,
llvm::DINode::FlagPrototyped, true /* isOptimized */);
dbg_info_->file_, function->getName(), "",
dbg_info_->file_,
0 /* line number */,
DIFunctionTy,
false, /* internal linkage */
true,
0 /* line number */,
llvm::DINode::FlagPrototyped,
true /* isOptimized */);
#endif
CHECK(DIFunction);
function->setSubprogram(DIFunction);
......@@ -223,6 +240,13 @@ void CodeGenCPU::AddMainFunction(const std::string& entry_func_name) {
global->setInitializer(llvm::ConstantDataArray::getString(*ctx_, entry_func_name));
}
std::unique_ptr<llvm::Module> CodeGenCPU::Finish() {
// link modules
if (dbg_info_ != nullptr) {
dbg_info_->di_builder_->finalize();
}
return CodeGenLLVM::Finish();
}
llvm::Value* CodeGenCPU::CreateStructRefPtr(
Type t, llvm::Value* buf, llvm::Value* index, int kind) {
if (kind < intrinsic::kArrKindBound_) {
......
......@@ -18,7 +18,6 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file codegen_llvm_cpu.h
* \brief Common base class for generating into LLVM IR on CPU host.
*/
......@@ -27,6 +26,7 @@
#include <utility>
#include <vector>
#include <memory>
#include <string>
#include <unordered_map>
#include "codegen_llvm.h"
......@@ -44,6 +44,7 @@ class CodeGenCPU : public CodeGenLLVM {
bool dynamic_lookup) override;
void AddFunction(const LoweredFunc& f) override;
void AddMainFunction(const std::string& entry_func_name) override;
std::unique_ptr<llvm::Module> Finish() override;
void VisitStmt_(const AssertStmt* op) override;
void VisitStmt_(const AttrStmt* op) override;
void VisitStmt_(const For* op) override;
......@@ -139,6 +140,8 @@ class CodeGenCPU : public CodeGenLLVM {
std::unordered_map<std::string, llvm::GlobalVariable*> func_handle_map_;
// List of symbols to be exported to TVM system lib.
std::vector<std::pair<std::string, llvm::Value*> > export_system_symbols_;
// internal debug information, to be populated by
std::unique_ptr<DebugInfo> dbg_info_;
// Get the DWARF type corresponding to the LLVM type |ty|. The current API in practice only
// generates |int32|, and |int8*|.
......
......@@ -73,7 +73,6 @@ void CodeGenLLVM::Init(const std::string& module_name,
md_tbaa_root_ = md_builder_->createTBAARoot("tvm-tbaa");
md_tbaa_alias_set_ = md_builder_->createTBAANode("tvm-alias", md_tbaa_root_);
this->InitTarget(tm);
dbg_info_ = CreateDebugInfo(module_.get());
}
void CodeGenLLVM::InitTarget(llvm::TargetMachine* tm) {
......@@ -171,8 +170,6 @@ void CodeGenLLVM::AddFunctionInternal(const LoweredFunc& f, bool ret_void) {
std::unique_ptr<llvm::Module> CodeGenLLVM::Finish() {
this->AddStartupFunction();
// link modules
dbg_info_->di_builder_->finalize();
for (size_t i = 0; i < link_modules_.size(); ++i) {
CHECK(!llvm::Linker::linkModules(*module_, std::move(link_modules_[i])))
<< "Failed to link modules";
......@@ -423,7 +420,8 @@ void CodeGenLLVM::GetAlignment(Type t,
*p_alignment = align_bits / 8;
}
std::unique_ptr<CodeGenLLVM::DebugInfo> CodeGenLLVM::CreateDebugInfo(llvm::Module* module) {
std::unique_ptr<CodeGenLLVM::DebugInfo>
CodeGenLLVM::CreateDebugInfo(llvm::Module* module) {
auto debug_info = llvm::make_unique<CodeGenLLVM::DebugInfo>();
debug_info->di_builder_ = llvm::make_unique<llvm::DIBuilder>(*module);
// TODO(tulloch): pass this information through relay::Span classes to the LoweredFunc instance?
......
......@@ -293,16 +293,16 @@ class CodeGenLLVM :
std::unordered_set<const Variable*> alias_var_set_;
// set of volatile buffer.
std::unordered_set<const Variable*> volatile_buf_;
/*! \brief Helper struct for debug infos. */
struct DebugInfo {
std::unique_ptr<llvm::DIBuilder> di_builder_;
llvm::DICompileUnit* compilation_unit_{nullptr};
llvm::DIFile* file_{nullptr};
};
std::unique_ptr<DebugInfo> dbg_info_;
// Create a new DebugInfo struct from the given Module that initializes the |file_| and
// |compilation_unit_| to TVM defaults.
/*!
* \brief Create a new DebugInfo struct from the given Module that
* initializes file and compilation_unit_ to TVM defaults.
*/
static std::unique_ptr<DebugInfo> CreateDebugInfo(llvm::Module* module);
};
} // namespace codegen
......
......@@ -18,7 +18,6 @@
*/
/*!
* Copyright (c) 2017 by Contributors
* \file expr_operator.cc
*/
#include <tvm/base.h>
......
......@@ -435,7 +435,7 @@ Expr Bind(const Expr& expr, const tvm::Map<Var, Expr>& args_map) {
func->type_params,
func->attrs);
CHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size());
return ret;
return std::move(ret);
} else {
return ExprBinder(args_map).VisitExpr(expr);
}
......
......@@ -18,8 +18,6 @@
*/
/*!
* Copyright (c) 2019 by Contributors
*
* \file eta_expand.cc
*
* \brief Add abstraction over a function. For example, abs will become (fun x -> abs x).
......@@ -61,7 +59,7 @@ Expr EtaExpand(const Expr& e, const Module& mod) {
auto new_func =
FunctionNode::make(args, CallNode::make(e, params), ret_type, original_type_params);
return new_func;
return std::move(new_func);
}
namespace transform {
......
......@@ -598,9 +598,10 @@ def test_forward_rnn_layer():
verify(mode, 10, 64, 64, 2)
verify(mode, 10, 64, 32, 2)
verify(mode, 10, 64, 32, 2, batch=2)
verify(mode, 10, 64, 64, 3, init_states=False)
verify(mode, 10, 32, 64, 1, bidirectional=True)
verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
# The following two codeblocks need to be fixed for mxnet 1.5
# verify(mode, 10, 64, 64, 3, init_states=False)
# verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
def test_forward_Crop():
def verify(xshape, yshape, offset=None):
......
......@@ -487,6 +487,8 @@ def test_dwarf_debug_information():
return
if tvm.codegen.llvm_version_major() < 5:
return
if tvm.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
......@@ -522,6 +524,8 @@ def test_dwarf_debug_information():
return
if tvm.codegen.llvm_version_major() < 5:
return
if tvm.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment