Unverified Commit 4332b0aa by Jared Roesch Committed by GitHub

[Relay][Runtime] Implementation of Relay VM (#2889)

* Implement the virtual machine

Co-Authored-By: wweic <ipondering.weic@gmail.com>

* Fix rebase build issues

* Reorganize vm.py and fix allocator bug

* Remove compiler

* Remove tests

* Remove backend/vm/vm.cc too

* Fix docs

* Fix doc

* Fix doc

* Add vm docs

* Remove change to dead_code.cc

* Remove Relay logging

* Remove reduce

* Update include/tvm/runtime/vm.h

Co-Authored-By: jroesch <roeschinc@gmail.com>

* Reformat

* Update include/tvm/runtime/vm.h

Co-Authored-By: jroesch <roeschinc@gmail.com>

* Address feedback

* Update include/tvm/runtime/vm.h

Co-Authored-By: jroesch <roeschinc@gmail.com>

* Apply suggestions from code review

Co-Authored-By: jroesch <roeschinc@gmail.com>

* Fix a couple outstanding comments

* Last couple comments

* Update include/tvm/runtime/vm.h

Co-Authored-By: jroesch <roeschinc@gmail.com>

* Address code review feedback

* Fix final comment

* Address comments

* Error reporting and example

* add Const

* Explicitly delete copy assignment operator

* Fix rebase

* Pass 3rd arg to fusion
parent 181dbd8e
......@@ -32,6 +32,7 @@ tvm_option(USE_LLVM "Build with LLVM, can be set to specific llvm-config path" O
tvm_option(USE_STACKVM_RUNTIME "Include stackvm into the runtime" OFF)
tvm_option(USE_GRAPH_RUNTIME "Build with tiny graph runtime" ON)
tvm_option(USE_GRAPH_RUNTIME_DEBUG "Build with tiny graph runtime debug mode" OFF)
tvm_option(USE_RELAY_DEBUG "Building Relay in debug mode..." OFF)
tvm_option(USE_SGX "Build with SGX" OFF)
tvm_option(USE_RTTI "Build with RTTI" ON)
tvm_option(USE_MSVC_MT "Build with MT" OFF)
......@@ -140,7 +141,10 @@ file(GLOB TOPI_SRCS
)
file(GLOB_RECURSE HALIDEIR_SRCS 3rdparty/HalideIR/src/*.cpp)
list(APPEND COMPILER_SRCS ${HALIDEIR_SRCS})
file(GLOB RUNTIME_SRCS src/runtime/*.cc)
file(GLOB RUNTIME_SRCS
src/runtime/*.cc
src/runtime/vm/*.cc
)
# Package runtime rules
if(NOT USE_RTTI)
......@@ -197,6 +201,13 @@ include(cmake/modules/contrib/HybridDump.cmake)
add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS})
add_library(tvm_topi SHARED ${TOPI_SRCS})
add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
if(USE_RELAY_DEBUG)
message(STATUS "Building Relay in debug mode...")
set_target_properties(tvm PROPERTIES COMPILE_DEFINITIONS "USE_RELAY_DEBUG")
set_target_properties(tvm PROPERTIES COMPILE_DEFINITIONS "NDEBUG")
endif(USE_RELAY_DEBUG)
if(NOT USE_SGX STREQUAL "OFF")
add_dependencies(tvm sgx_edl)
add_dependencies(tvm_runtime sgx_edl tvm_t)
......
......@@ -134,3 +134,7 @@ set(USE_ANTLR OFF)
# Build TSIM for VTA
set(USE_VTA_TSIM OFF)
# Whether use Relay debug mode
set(USE_RELAY_DEBUG OFF)
......@@ -320,6 +320,22 @@ TVM_DLL bool AlphaEqual(const Expr& e1, const Expr& e2);
*/
TVM_DLL bool AlphaEqual(const Type& t1, const Type& t2);
/*! \brief Add abstraction over a function
*
* For example: `square` is transformed to
* `fun x -> square x`.
*
* See https://en.wikipedia.org/wiki/Lambda_calculus#%CE%B7-conversion
* for more details.
*
* \param e The original function.
* \param mod The module used for referencing global functions, can be
* None.
*
* \return the new function with abstraction
*/
TVM_DLL Expr EtaExpand(const Expr& e, const Module& mod);
/*! \brief Check that each Var is only bound once.
*
* For example, the expression `let x = 1 in let x = 2 in 3` bound x twice.
......@@ -467,9 +483,10 @@ TVM_DLL Expr FoldConstant(const Expr& expr);
* \brief Fuse operations into expr into seperate functions.
* \param expr The expression.
* \param fuse_opt_level Optimization level.
* \param mod the module.
* \return The optimized expression.
*/
TVM_DLL Expr FuseOps(const Expr& expr, int fuse_opt_level);
TVM_DLL Expr FuseOps(const Expr& expr, int fuse_opt_level, const Module& mod);
/*!
* \brief Apply rewrite rules to rewrite the expr in post DFS order.
......
......@@ -103,6 +103,7 @@ typedef enum {
kStr = 11U,
kBytes = 12U,
kNDArrayContainer = 13U,
kObject = 14U,
// Extension codes for other frameworks to integrate TVM PackedFunc.
// To make sure each framework's id do not conflict, use first and
// last sections to mark ranges.
......@@ -113,7 +114,6 @@ typedef enum {
// The following section of code is used for non-reserved types.
kExtReserveEnd = 64U,
kExtEnd = 128U,
kObject = 14U,
} TVMTypeCode;
/*!
......
......@@ -306,9 +306,11 @@ class NDArray::Container {
DLContext ctx) {
dl_tensor.data = data;
shape_ = std::move(shape);
dl_tensor.shape = dmlc::BeginPtr(shape);
dl_tensor.ndim = static_cast<int>(shape.size());
dl_tensor.ndim = static_cast<int>(shape_.size());
dl_tensor.shape = dmlc::BeginPtr(shape_);
dl_tensor.dtype = dtype;
dl_tensor.strides = nullptr;
dl_tensor.byte_offset = 0;
dl_tensor.ctx = ctx;
}
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file tvm/runtime/vm.h
* \brief A virtual machine for executing Relay programs.
*/
#ifndef TVM_RUNTIME_VM_H_
#define TVM_RUNTIME_VM_H_
#include <tvm/runtime/object.h>
#include <tvm/runtime/packed_func.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
namespace tvm {
namespace runtime {
namespace vm {
/*! \brief A register name. */
using RegName = int64_t;
/*! \brief An alias for the integer type used ubiquitously
* in the VM.
*/
using Index = int64_t;
/*! \brief An enumeration of Relay's opcodes.
*
* The opcode is used to implement instruction
* as a tagged union.
*/
enum class Opcode {
Move = 0U,
Ret = 1U,
Invoke = 2U,
InvokeClosure = 3U,
InvokePacked = 4U,
AllocTensor = 5U,
AllocDatatype = 6U,
AllocClosure = 7U,
GetField = 8U,
If = 9U,
Select = 10U,
LoadConst = 11U,
Goto = 12U
};
/*! \brief A single virtual machine instruction.
*
* The representation of the instruction is as
* a tagged union.
*
* The first field represents which instruction,
* and by extension which field of the union
* is active.
*/
struct Instruction {
/*! \brief The instruction opcode. */
Opcode op;
/*! \brief The destination register. */
RegName dst;
union {
struct /* AllocTensor Operands */ {
/*! \brief The register to read the shape out of. */
RegName shape_register;
/*! \brief The datatype of tensor to be allocated. */
DLDataType dtype;
};
struct /* InvokeClosure Operands */ {
/*! \brief The register containing the closure. */
RegName closure;
/*! \brief The number of arguments to the closure. */
Index closure_args_num;
/*! \brief The closure arguments as an array. */
RegName* closure_args;
};
struct /* Return Operands */ {
/*! \brief The register to return. */
RegName result;
};
struct /* Move Operands */ {
/*! \brief The source register for a move operation. */
RegName from;
};
struct /* Packed Operands */ {
/*! \brief The index into the packed function table. */
Index packed_index;
/*! \brief The arity of the packed function. */
Index arity;
/*! \brief The number of outputs produced by the packed function. */
Index output_size;
/*! \brief The arguments to pass to the packed function. */
RegName* packed_args;
};
struct /* Select Operands */ {
/*! \brief The condition of select. */
RegName select_cond;
/*! \brief The true branch. */
RegName select_op1;
/*! \brief The false branch. */
RegName select_op2;
};
struct /* If Operands */ {
/*! \brief The register containing the condition value. */
RegName if_cond;
/*! \brief The program counter offset for the true branch. */
Index true_offset;
/*! \brief The program counter offset for the false branch. */
Index false_offset;
};
struct /* Invoke Operands */ {
/*! \brief The function to call. */
Index func_index;
/*! \brief The number of arguments to the function. */
Index num_args;
/*! \brief The registers containing the arguments. */
RegName* invoke_args_registers;
};
struct /* Const Operands */ {
/* \brief The index into the constant pool. */
Index const_index;
};
struct /* Jump Operands */ {
/*! \brief The jump offset. */
Index pc_offset;
};
struct /* Proj Operands */ {
/*! \brief The register to project from. */
RegName object;
/*! \brief The field to read out. */
Index field_index;
};
struct /* AllocDatatype Operands */ {
/*! \brief The datatype's constructor tag. */
Index constructor_tag;
/*! \brief The number of fields to store in the datatype. */
Index num_fields;
/*! \brief The fields as an array. */
RegName* datatype_fields;
};
struct /* AllocClosure Operands */ {
/*! \brief The index into the function table. */
Index clo_index;
/*! \brief The number of free variables to capture. */
Index num_freevar;
/*! \brief The free variables as an array. */
RegName* free_vars;
};
};
/*! \brief Construct a select instruction.
* \param cond The condition register.
* \param op1 The true register.
* \param op2 The false register.
* \param dst The destination register.
* \return The select instruction.
*/
static Instruction Select(RegName cond, RegName op1, RegName op2, RegName dst);
/*! \brief Construct a return instruction.
* \param return_reg The register containing the return value.
* \return The return instruction.
* */
static Instruction Ret(RegName return_reg);
/*! \brief Construct a invoke packed instruction.
* \param packed_index The index of the packed function.
* \param arity The arity of the function.
* \param output_size The number of outputs of the packed function.
* \param args The argument registers.
* \return The invoke packed instruction.
*/
static Instruction InvokePacked(Index packed_index, Index arity, Index output_size,
const std::vector<RegName>& args);
/*! \brief Construct an allocate tensor instruction.
* \param shape_register The register containing the shape.
* \param dtype The dtype of the tensor.
* \param dst The destination register.
* \return The allocate tensor instruction.
*/
static Instruction AllocTensor(RegName shape_register, DLDataType dtype, RegName dst);
/*! \brief Construct an allocate datatype instruction.
* \param tag The datatype tag.
* \param num_fields The number of fields for the datatype.
* \param fields The registers containing the fields.
* \param dst The register name of the destination.
* \return The allocate instruction tensor.
*/
static Instruction AllocDatatype(Index tag, Index num_fields, const std::vector<RegName>& fields,
RegName dst);
/*! \brief Construct an allocate closure instruction.
* \param func_index The index of the function table.
* \param num_freevar The number of free variables.
* \param free_vars The registers of the free variables.
* \param dst The destination register.
* \return The allocate closure instruction.
*/
static Instruction AllocClosure(Index func_index, Index num_freevar,
const std::vector<RegName>& free_vars, RegName dst);
/*! \brief Construct a get field instruction.
* \param object_reg The register containing the object to project from.
* \param field_index The field to read out of the object.
* \param dst The destination register.
* \return The get field instruction.
*/
static Instruction GetField(RegName object_reg, Index field_index, RegName dst);
/*! \brief Construct an if instruction.
* \param cond_reg The register containing the condition.
* \param true_branch The offset to the true branch.
* \param false_branch The offset to the false branch.
* \return The if instruction.
*/
static Instruction If(RegName cond_reg, Index true_branch, Index false_branch);
/*! \brief Construct a goto instruction.
* \param pc_offset The offset from the current pc.
* \return The goto instruction.
*/
static Instruction Goto(Index pc_offset);
/*! \brief Construct an invoke instruction.
* \param func_index The index of the function to invoke.
* \param args The registers containing the arguments.
* \param dst The destination register.
* \return The invoke instruction.
*/
static Instruction Invoke(Index func_index, const std::vector<RegName>& args, RegName dst);
/*! \brief Construct an invoke closure instruction.
* \param closure The register of the closure to invoke.
* \param args The registers containing the arguments.
* \param dst The destination register.
* \return The invoke closure instruction.
*/
static Instruction InvokeClosure(RegName closure, const std::vector<RegName>& args, RegName dst);
/*! \brief Construct a load constant instruction.
* \param const_index The index of the constant.
* \param dst The destination register.
* \return The load constant instruction.
*/
static Instruction LoadConst(Index const_index, RegName dst);
/*! \brief Construct a move instruction.
* \param src The source register.
* \param dst The destination register.
* \return The move instruction.
*/
static Instruction Move(RegName src, RegName dst);
Instruction();
Instruction(const Instruction& instr);
Instruction& operator=(const Instruction& instr) = delete;
~Instruction();
friend std::ostream& operator<<(std::ostream& os, const Instruction&);
};
/*! \brief A representation of a Relay function in the VM.
*
* Contains metadata about the compiled function, as
* well as the compiled VM instructions.
*/
struct VMFunction {
/*! \brief The function's name. */
std::string name;
/*! \brief The number of function parameters. */
Index params;
/*! \brief The instructions representing the function. */
std::vector<Instruction> instructions;
/*! \brief The size of the frame for this function */
Index register_file_size;
VMFunction(const std::string& name, Index params,
const std::vector<Instruction>& instructions,
Index register_file_size)
: name(name),
params(params),
instructions(instructions),
register_file_size(register_file_size) {}
VMFunction() {}
friend std::ostream& operator<<(std::ostream& os, const VMFunction&);
};
/*! \brief A representation of a stack frame.
*
* A stack frame is a record containing the information needed
* to restore the caller's virtual machine state after returning
* from a function call.
*/
struct VMFrame {
/*! \brief The return program counter. */
Index pc;
/*! \brief The index into the function table, points to the caller. */
Index func_index;
/*! \brief The number of arguments. */
Index args;
/*! \brief A pointer into the caller function's instructions. */
const Instruction* code;
/*! \brief Statically allocated space for objects */
std::vector<Object> register_file;
/*! \brief Register in caller's frame to put return value */
RegName caller_return_register;
VMFrame(Index pc, Index func_index, Index args, const Instruction* code, Index register_file_size)
: pc(pc),
func_index(func_index),
args(args),
code(code),
register_file(register_file_size),
caller_return_register(0) {}
};
/*! \brief The virtual machine.
*
* The virtual machine contains all the current execution state,
* as well as the global view of functions, the global constant
* table, the compiled operators.
*
* The goal is to have a single self-contained object,
* enabling one to easily pass around VMs, execute them on
* multiple threads, or serialized them to disk or over the
* wire.
*/
struct VirtualMachine {
/*! \brief The virtual machine's packed function table. */
std::vector<PackedFunc> packed_funcs;
/*! \brief The virtual machine's function table. */
std::vector<VMFunction> functions;
/*! \brief The current stack of call frames. */
std::vector<VMFrame> frames;
/*! \brief The global constant pool. */
std::vector<Object> constants;
/*! \brief The fuction table index of the current function. */
Index func_index;
/*! \brief The current pointer to the code section. */
const Instruction* code;
/*! \brief The virtual machine PC. */
Index pc;
/*! \brief The special return register. */
Object return_register;
/*! \brief The set of TVM contexts the VM is currently executing on. */
std::vector<TVMContext> ctxs;
/*! \brief Push a call frame on to the call stack. */
void PushFrame(Index arg_count, Index ret_pc, const VMFunction& vm_func);
/*! \brief Pop a frame off the call stack.
* \return The number of frames left.
*/
Index PopFrame();
/*! \brief Write to a VM register.
* \param reg The register to write to.
* \param obj The object to write to.
*/
inline void WriteRegister(RegName reg, const Object& obj);
/*! \brief Read a VM register.
* \param reg The register to read from.
* \return The read object.
*/
inline Object ReadRegister(RegName reg) const;
/*! \brief Invoke a VM function.
* \param func The function.
* \param args The arguments to the function.
* \return The object representing the result.
*/
Object Invoke(const VMFunction& func, const std::vector<Object>& args);
// TODO(@jroesch): I really would like this to be a global variable.
/*! \brief Invoke a VM function by name.
* \param name The function's name.
* \param args The arguments to the function.
* \return The object representing the result.
*/
Object Invoke(const std::string& name, const std::vector<Object>& args);
VirtualMachine() : functions(), frames(), func_index(0), code(nullptr), pc(0) {}
/*! \brief Initialize the virtual machine for a set of contexts.
* \param contexts The set of TVM contexts.
*/
void Init(const std::vector<TVMContext>& contexts);
void Run();
/*! \brief A map from globals (as strings) to their index in the function map.
*/
std::unordered_map<std::string, Index> global_map_;
private:
/*! \brief Invoke a global setting up the VM state to execute.
*
* This does not begin execution of the VM.
*/
void InvokeGlobal(const VMFunction& func, const std::vector<Object>& args);
};
} // namespace vm
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_VM_H_
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relay virtual machine FFI namespace.
"""
from tvm._ffi.function import _init_api
_init_api("relay._vm", __name__)
......@@ -26,6 +26,7 @@ from ... import register_func, nd
from ..base import NodeBase, register_relay_node
from ..expr import Tuple, RefCreate, Call, Constant, GlobalVar, Function, const
from ..scope_builder import ScopeBuilder
from . import _vm
class Value(NodeBase):
"""Base class of all values.
......@@ -36,6 +37,9 @@ class Value(NodeBase):
"""Convert a Python scalar to a Relay scalar."""
return TensorValue(const(value, dtype).data)
def to_vm(self):
return _vm._ValueToVM(self)
@register_relay_node
class TupleValue(Value):
......@@ -278,7 +282,7 @@ class Interpreter(Executor):
ck_expr = ir_pass.infer_type(wrapped_expr, mod=self.mod)
simp_expr = ir_pass.simplify_inference(ck_expr)
ck_simp = ir_pass.infer_type(simp_expr, mod=self.mod)
fused_expr = ir_pass.fuse_ops(ck_simp)
fused_expr = ir_pass.fuse_ops(ck_simp, 0, mod=self.mod)
ck_fused = ir_pass.infer_type(fused_expr, mod=self.mod)
return ck_fused if isinstance(expr, Function) else Call(ck_fused, [])
......
# License .to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable
"""
The Relay Virtual Vachine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import tvm
from tvm._ffi.function import Object
import numpy as np
from .. import ir_pass
from ..backend.interpreter import Executor
from ..expr import GlobalVar, Function, Expr
from . import _vm
Object = Object
def optimize(expr, mod=None):
# TODO: We need to move this optimization code into the optimizer/pass manager
ck_expr = ir_pass.infer_type(expr, mod=mod)
simplified_expr = ir_pass.simplify_inference(ck_expr)
simplified_expr = ir_pass.infer_type(simplified_expr, mod=mod)
fused_expr = ir_pass.fuse_ops(simplified_expr, mod=mod)
ck_fused = ir_pass.infer_type(fused_expr, mod=mod)
return ck_fused
def _convert(arg, cargs):
if isinstance(arg, np.ndarray):
tensor = _vm._Tensor(tvm.nd.array(arg))
cargs.append(tensor)
elif isinstance(arg, tvm.nd.NDArray):
tensor = _vm._Tensor(arg)
cargs.append(tensor)
elif isinstance(arg, tuple):
field_args = []
for field in arg:
_convert(field, field_args)
cargs.append(_vm._Tuple(*field_args))
else:
raise "unsupported type"
def convert(args):
cargs = []
for arg in args:
_convert(arg, cargs)
return cargs
def _eval_vm(mod, ctx, *args):
"""
Evaluate a module on a given context with the provided arguments.
Parameters
----------
mod: relay.Module
The module to optimize, will execute its entry_func.
ctx: tvm.Context
The TVM context to execute on.
args: List[tvm.NDArray, np.ndarray]
The arguments to evaluate.
"""
main_func = mod[mod.entry_func]
if not main_func.params and isinstance(main_func.body, GlobalVar):
main_func = ir_pass.eta_expand(main_func.body, mod)
assert isinstance(main_func, Function)
main_func = optimize(mod[mod.entry_func], mod)
mod[mod.entry_func] = main_func
args = list(args)
assert isinstance(args, list)
cargs = convert(args)
result = _vm._evaluate_vm(mod, ctx.device_type, ctx.device_id, *cargs)
return result
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.relay.vm`.
Parameters
----------
mod : :py:class:`~tvm.relay.module.Module`
The module to support the execution.
ctx : :py:class:`TVMContext`
The runtime context to run the code on.
target : :py:class:`Target`
The target option to build the function.
"""
def __init__(self, mod, ctx, target):
self.mod = mod
self.ctx = ctx
self.target = target
def _make_executor(self, expr):
assert isinstance(expr, Expr)
self.mod[self.mod.entry_func] = expr
main = self.mod[self.mod.entry_func]
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(main, args, kwargs)
return _eval_vm(self.mod, self.ctx, *args)
return _vm_wrapper
......@@ -29,6 +29,7 @@ from . import expr as _expr
from . import ty as _ty
from .backend import interpreter as _interpreter
from .backend import graph_runtime_codegen as _graph_gen
from .backend.vm import VMExecutor
# List of optimization pass and level when switch on
OPT_PASS_LEVEL = {
......@@ -484,4 +485,7 @@ def create_executor(kind="debug",
return _interpreter.Interpreter(mod, ctx, target)
if kind == "graph":
return GraphExecutor(mod, ctx, target)
raise RuntimeError("unknown mode {0}".format(mode))
elif kind == "vm":
return VMExecutor(mod, ctx, target)
else:
raise RuntimeError("unknown execution strategy: {0}".format(kind))
......@@ -126,6 +126,20 @@ class Expr(RelayNode):
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __call__(self, *args):
"""Call the variable (if it represents a function).
Parameters
----------
args: List[relay.Expr]
The arguments to the call.
Returns
-------
call: Call
A call taking the variable as a function.
"""
return Call(self, args)
@register_relay_node
class Constant(Expr):
......@@ -191,20 +205,6 @@ class Var(Expr):
name = self.vid.name_hint
return name
def __call__(self, *args):
"""Call the variable (if it represents a function).
Parameters
----------
args: List[relay.Expr]
The arguments to the call.
Returns
-------
call: Call
A call taking the variable as a function.
"""
return Call(self, args)
@register_relay_node
class GlobalVar(Expr):
......
......@@ -391,6 +391,23 @@ def backward_fold_scale_axis(expr):
"""
return _ir_pass.backward_fold_scale_axis(expr)
def eta_expand(expr, mod):
"""Add abstraction over a function.
Parameters
----------
expr : tvm.relay.Expr
The input expression, we expect that expr's types
should be fully inferred by infer_type.
mod : tvm.relay.Module
The global module.
Returns
-------
expanded_expr : tvm.relay.Expr
The expression after eta expansion.
"""
return _ir_pass.eta_expand(expr, mod)
def forward_fold_scale_axis(expr):
"""Fold the scaling of axis into weights of conv2d/dense.
......@@ -703,7 +720,7 @@ def fold_constant(expr):
return _ir_pass.FoldConstant(expr)
def fuse_ops(expr, opt_level=1):
def fuse_ops(expr, opt_level=1, mod=None):
"""Fuse operators in expr together.
Parameters
......@@ -714,12 +731,15 @@ def fuse_ops(expr, opt_level=1):
opt_level : int
The level of fuse optimization.
mod : tvm.relay.Module
The module to perform fusion over.
Returns
-------
transformed_expr : tvm.relay.Expr
Transformed expression, containing fused result.
"""
return _ir_pass.FuseOps(expr, opt_level)
return _ir_pass.FuseOps(expr, opt_level, mod)
def combine_parallel_conv2d(expr, min_num_branches=3):
......
......@@ -21,7 +21,6 @@ from .._ffi import base as _base
from . import _make
from . import _module
from . import expr as _expr
from . import ty as _ty
@register_relay_node
......@@ -77,9 +76,18 @@ class Module(RelayNode):
return self._add(var, val)
def _add(self, var, val, update=False):
if isinstance(val, _expr.Function):
if isinstance(val, _expr.Expr):
if isinstance(var, _base.string_types):
var = _expr.GlobalVar(var)
# TODO(@jroesch): Port this logic to C++.
if not isinstance(val, _expr.Function):
if isinstance(val, _expr.GlobalVar):
val = ir_pass.eta_expand(val, self)
else:
val = _expr.Function([], val)
_make.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
......@@ -156,3 +164,7 @@ class Module(RelayNode):
tvm.TVMError if we cannot find corresponding global type var.
"""
return _module.Module_GetGlobalTypeVar(self, name)
@staticmethod
def from_expr(expr):
return _module.Module_FromExpr(expr)
......@@ -510,7 +510,7 @@ Mutate_(const Add* op, const Expr& self) {
} else {
ret.CopyOnWrite()->AddToSelf(ToSplitExpr(b), 1);
}
return ret;
return std::move(ret);
}
Expr CanonicalSimplifier::Impl::
......@@ -536,7 +536,7 @@ Mutate_(const Sub* op, const Expr& self) {
} else {
ret.CopyOnWrite()->AddToSelf(ToSplitExpr(b), -1);
}
return ret;
return std::move(ret);
}
......@@ -561,11 +561,11 @@ Mutate_(const Mul* op, const Expr& self) {
if (a.as<SumExprNode>()) {
SumExpr ret(std::move(a.node_));
ret.CopyOnWrite()->MulToSelf(bconst->value);
return ret;
return std::move(ret);
} else {
SplitExpr ret = ToSplitExpr(std::move(a));
ret.CopyOnWrite()->MulToSelf(bconst->value);
return ret;
return std::move(ret);
}
}
......@@ -684,7 +684,7 @@ Mutate_(const Div* op, const Expr& self) {
SplitDivConst(ToSplitExpr(temp), cval), 1);
}
}
return lhs;
return std::move(lhs);
}
} else {
// if a >= 0 && a < cval, then result == 0
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -39,7 +39,7 @@ namespace relay {
namespace backend {
/*!
* \brief Context name / index
* \brief Context name / index
* See: python/tvm/_ffi/runtime_ctypes.py
*/
struct ContextMap {
......@@ -91,13 +91,13 @@ const std::unordered_map<std::string, int> ContextMap::str2mask = {
/*!
* \brief A data structure to map the names of specific optimizations to
* numeric optimization levels
*
*
*/
struct OptPassLevel {
static const std::unordered_map<std::string, int> _data;
/*!
* \brief Get level for an optimization pass
*
*
* \param key pass name
* \return int level
*/
......@@ -123,7 +123,7 @@ const std::unordered_map<std::string, int> OptPassLevel::_data = {
/*!
* \brief Output of building module
*
*
*/
struct BuildOutput {
std::string graph_json;
......@@ -133,7 +133,7 @@ struct BuildOutput {
/*!
* \brief Relay building config
*
*
*/
struct RelayBuildConfig {
int opt_level{2};
......@@ -153,8 +153,8 @@ struct RelayBuildConfig {
};
/*!
* \brief GraphCodegen module wrapper
*
* \brief GraphCodegen module wrapper
*
*/
struct GraphCodegen {
public:
......@@ -225,7 +225,7 @@ Function CallPackedFunc(const std::string &name, Args... args) {
/*!
* \brief Relay build module
*
*
*/
class RelayBuildModule : public runtime::ModuleNode {
public:
......@@ -309,23 +309,23 @@ class RelayBuildModule : public runtime::ModuleNode {
}
/*!
* \brief Add extra pass into build cfg
*
* \param pass_name name of pass
*
* \param pass_name name of pass
*/
void AddPass(const std::string& pass_name) {
cfg_.enabled_pass.insert(pass_name);
}
/*!
* \brief Disable a specific pass in cfg
*
*
* \param pass_name name of pass
*/
void DisablePass(const std::string& pass_name) {
cfg_.disabled_pass.insert(pass_name);
}
/*!
* \brief Set the Fallback device
*
* \brief Set the Fallback device
*
* \param device name
*/
void SetFallBackDev(const std::string& dev) {
......@@ -342,7 +342,7 @@ class RelayBuildModule : public runtime::ModuleNode {
/*!
* \brief List all paramter names
*
*
* \return Array<StringImm> names of params
*/
Array<HalideIR::Expr> ListParamNames() {
......@@ -355,7 +355,7 @@ class RelayBuildModule : public runtime::ModuleNode {
/*!
* \brief Get params dictionary
*
*
* \return Map<std::string, Constant> params dictionary
*/
Map<std::string, Constant> GetParams() {
......@@ -527,10 +527,10 @@ class RelayBuildModule : public runtime::ModuleNode {
* compilation. CPU is used as the fallback device if it wasn't provided.
* Meanwhile, a CPU device type and "llvm" pair will be added to the target
* dictionary in this case.
*
*
* \param targets dictionary
* \param cfg
* \return Map<HalideIR::Expr, HalideIR::Expr>
* \param cfg
* \return Map<HalideIR::Expr, HalideIR::Expr>
*/
Map<HalideIR::Expr, HalideIR::Expr> UpdateHeterogeneousInputs(
const std::unordered_map<std::string, std::string>& targets,
......@@ -555,11 +555,11 @@ class RelayBuildModule : public runtime::ModuleNode {
/*!
* \brief Execute the device annotation passes to update the input program and
* target information.
*
* \param func
* \param cfg
* \param targets_map_ptr
* \return Function
*
* \param func
* \param cfg
* \param targets_map_ptr
* \return Function
*/
Function RunDeviceAnnotationPass(
Function func,
......@@ -603,7 +603,7 @@ class RelayBuildModule : public runtime::ModuleNode {
}
/*!
* \brief Build module given lowered functions for each target
*
*
* \param lowered_funcs target_str -> Array<LoweredFunc> map
* \param targets Targets map
* \param cfg Building configuration
......@@ -674,8 +674,9 @@ class RelayBuildModule : public runtime::ModuleNode {
if (device_target.size() > 1) {
func = RunDeviceAnnotationPass(func, cfg, &device_target);
}
// TODO(@jroesch): use the passes directly.
func = CallPackedFunc("relay._ir_pass.infer_type", func, nullptr);
func = CallPackedFunc("relay._ir_pass.FuseOps", func, cfg.opt_level);
func = CallPackedFunc("relay._ir_pass.FuseOps", func, cfg.opt_level, nullptr);
func = CallPackedFunc("relay._ir_pass.infer_type", func, nullptr);
graph_codegen_ = std::unique_ptr<GraphCodegen>(new GraphCodegen());
......
......@@ -28,6 +28,7 @@
#include <tvm/lowered_func.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/pass.h>
#include <string>
#include <functional>
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -278,17 +278,19 @@ class Interpreter :
return TupleValueNode::make(values);
}
// TODO(@jroesch): this doesn't support mutual letrec.
Value MakeClosure(const Function& func, const Var& letrec_name = Var()) {
// TODO(@jroesch): this doesn't support mututal letrec
inline Value MakeClosure(const Function& func, Var letrec_name = Var()) {
tvm::Map<Var, Value> captured_mod;
Array<Var> free_vars = FreeVars(func);
for (const auto& var : free_vars) {
// Evaluate the free var (which could be a function call) if it hasn't
// shown up in a letting binding that has invoked the function.
if (!letrec_name.defined() || letrec_name != var) {
captured_mod.Set(var, Eval(var));
if (letrec_name.defined() && letrec_name == var) {
continue;
}
captured_mod.Set(var, Eval(var));
}
// We must use mutation here to build a self referential closure.
......@@ -296,7 +298,7 @@ class Interpreter :
auto mut_closure =
static_cast<ClosureNode*>(const_cast<Node*>(closure.get()));
mut_closure->env.Set(letrec_name, closure);
return closure;
return std::move(closure);
}
Value VisitExpr_(const FunctionNode* func_node) final {
......
......@@ -113,6 +113,7 @@ void ErrorReporter::RenderErrors(const Module& module, bool use_color) {
annotated_prog << AsText(func, false, [&err_map](tvm::relay::Expr expr) {
auto it = err_map.find(expr);
if (it != err_map.end()) {
CHECK_NE(it->second.size(), 0);
return it->second;
} else {
return std::string("");
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -271,6 +271,7 @@ class RelayHashHandler:
}
for (auto t : call->type_args) {
CHECK(t.defined());
hash = Combine(hash, TypeHash(t));
}
......@@ -394,7 +395,6 @@ class RelayHashHandler:
size_t hash = std::hash<std::string>()(PatternWildcardNode::_type_key);
return hash;
}
private:
// renaming of NodeRef to indicate two nodes equals to each other
std::unordered_map<NodeRef, size_t, NodeHash, NodeEqual> hash_map_;
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -59,9 +59,13 @@ Module ModuleNode::make(tvm::Map<GlobalVar, Function> global_funcs,
GlobalVar ModuleNode::GetGlobalVar(const std::string& name) {
auto it = global_var_map_.find(name);
CHECK(it != global_var_map_.end())
<< "Cannot find global var " << name << " in the Module";
return (*it).second;
if (it == global_var_map_.end()) {
auto gvar = GlobalVarNode::make(name);
global_var_map_.Set(name, gvar);
return gvar;
} else {
return (*it).second;
}
}
void ModuleNode::AddUnchecked(const GlobalVar& var,
......@@ -215,6 +219,11 @@ TVM_REGISTER_API("relay._module.Module_LookupDef_str")
return mod->LookupDef(var);
});
TVM_REGISTER_API("relay._module.Module_FromExpr")
.set_body_typed<Module(Expr)>([](Expr e) {
return ModuleNode::FromExpr(e);
});
TVM_REGISTER_API("relay._module.Module_Update")
.set_body_typed<void(Module, Module)>([](Module mod, Module from) {
mod->Update(from);
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -94,7 +94,6 @@ class TypeFunctor<R(const Type& n, Args...)> {
virtual R VisitType_(const GlobalTypeVarNode* op, Args... args) TYPE_FUNCTOR_DEFAULT;
virtual R VisitType_(const TypeCallNode* op, Args... args) TYPE_FUNCTOR_DEFAULT;
virtual R VisitType_(const TypeDataNode* op, Args... args) TYPE_FUNCTOR_DEFAULT;
virtual R VisitTypeDefault_(const Node* op, Args...) {
LOG(FATAL) << "Do not have a default for " << op->type_key();
throw; // unreachable, written to stop compiler warning
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -24,7 +24,6 @@
* for type relations.
*/
#include <tvm/relay/expr.h>
#include <tvm/relay/logging.h>
#include <tvm/relay/op.h>
#include <tvm/ir_pass.h>
#include <numeric>
......@@ -109,7 +108,7 @@ bool BroadcastRel(const Array<Type>& types,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
RELAY_LOG(INFO) << "In1:" << types[0] << ",In2:" << types[1]
DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1]
<< ",Out:" << types[2] << std::endl;
if (auto t0 = ToTensorType(types[0])) {
if (auto t1 = ToTensorType(types[1])) {
......@@ -127,7 +126,7 @@ bool BroadcastCompRel(const Array<Type>& types,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
RELAY_LOG(INFO) << "In1:" << types[0] << ",In2:" << types[1]
DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1]
<< ",Out:" << types[2] << std::endl;
if (auto t0 = ToTensorType(types[0])) {
if (auto t1 = ToTensorType(types[1])) {
......
......@@ -18,34 +18,54 @@
*/
/*!
* \file tvm/relay/logging.h
* \brief A wrapper around dmlc-core/logging.h which adds the ability
* to toggle logging via an environment variable.
* Copyright (c) 2019 by Contributors
*
* \file eta_expand.cc
*
* \brief Add abstraction over a function. For example, abs will become (fun x -> abs x).
*
*/
#ifndef TVM_RELAY_LOGGING_H_
#define TVM_RELAY_LOGGING_H_
#include <dmlc/logging.h>
#include <string>
#include <cstdlib>
#include <iostream>
#include <tvm/relay/pass.h>
namespace tvm {
namespace relay {
static bool logging_enabled() {
if (auto var = std::getenv("RELAY_LOG")) {
std::string is_on(var);
return is_on == "1";
Expr EtaExpand(const Expr& e, const Module& mod) {
tvm::Array<Var> original_params;
tvm::Array<Expr> params;
tvm::Array<Var> args;
tvm::Array<TypeVar> original_type_params;
Type ret_type;
if (e->is_type<GlobalVarNode>()) {
auto gvar_node = e.as_derived<GlobalVarNode>();
auto func = mod->Lookup(GetRef<GlobalVar>(gvar_node));
original_params = func->params;
original_type_params = func->type_params;
ret_type = func->ret_type;
} else {
return false;
auto inferred = InferType(e, mod);
CHECK(inferred->is_type<FunctionNode>());
auto func = GetRef<Function>(inferred.as_derived<FunctionNode>());
original_params = func->params;
original_type_params = func->type_params;
ret_type = func->ret_type;
}
for (size_t i = 0; i < original_params.size(); ++i) {
auto var = VarNode::make("a", original_params[i]->type_annotation);
params.push_back(var);
args.push_back(var);
}
auto new_func =
FunctionNode::make(args, CallNode::make(e, params), ret_type, original_type_params);
return InferType(new_func, mod);
}
#define RELAY_LOG(severity) LOG_IF(severity, logging_enabled())
TVM_REGISTER_API("relay._ir_pass.eta_expand").set_body_typed(EtaExpand);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_LOGGING_H_
......@@ -156,7 +156,7 @@ class ConstantFolder : public ExprMutator {
// Constant evaluate a expression.
Expr ConstEvaluate(Expr expr) {
expr = InferType(expr, Module(nullptr));
expr = FuseOps(expr, 0);
expr = FuseOps(expr, 0, Module(nullptr));
expr = InferType(expr, Module(nullptr));
return ValueToExpr(executor_(expr));
}
......
......@@ -808,6 +808,7 @@ class FuseMutator : private ExprMutator {
std::unordered_map<const Node*, GraphPartitioner::Group*> gmap_;
/* \brief Internal group information map. */
std::unordered_map<GraphPartitioner::Group*, GroupInfo> ginfo_;
// Skip primitive function.
Expr VisitExpr_(const FunctionNode* fn_node) {
if (fn_node->IsPrimitive()) {
......@@ -816,6 +817,7 @@ class FuseMutator : private ExprMutator {
return ExprMutator::VisitExpr_(fn_node);
}
}
// Transform calls.
Expr VisitExpr_(const CallNode* call) {
static const Op& stop_fusion = Op::Get("annotation.stop_fusion");
......@@ -870,7 +872,7 @@ class FuseMutator : private ExprMutator {
return MakeNewFunction(ret_group, tuple_get->checked_type(), new_node);
}
// This is an intermediate node in the group
return new_node;
return std::move(new_node);
}
Expr MakeNewFunction(GraphPartitioner::Group* group, Type ret_type, Expr body) {
......@@ -919,13 +921,45 @@ class FuseMutator : private ExprMutator {
}
};
// Temporary solution, should be handled by implementing a "FunctionPass"
// which applies fusion to each function.
struct GlobalVarLiveness : ExprVisitor {
Module module;
std::set<GlobalVar> visited;
explicit GlobalVarLiveness(const Module& mod) : module(mod), visited() {}
Expr FuseOps(const Expr& expr, int fuse_opt_level) {
void VisitExpr_(const GlobalVarNode* gvar_node) {
auto gvar = GetRef<GlobalVar>(gvar_node);
if (visited.find(gvar) == visited.end()) {
visited.insert(gvar);
this->VisitExpr(this->module->Lookup(gvar));
}
}
};
std::set<GlobalVar> LiveGlobals(const Module& mod, const Expr& expr) {
auto gvl = GlobalVarLiveness(mod);
gvl.VisitExpr(expr);
return gvl.visited;
}
Expr FuseOps(const Expr& expr, int fuse_opt_level, const Module& module) {
// First we convert all chains of fusable ops into
// abstracted functions which we mark as primtive
// then we convert these primtive functions into
// new operators.
return FuseMutator().Transform(expr, fuse_opt_level);
if (!module.defined()) {
return FuseMutator().Transform(expr, fuse_opt_level);
} else {
auto lgvs = LiveGlobals(module, expr);
for (auto lv : lgvs) {
auto body = module->Lookup(lv);
auto e = FuseMutator().Transform(body, fuse_opt_level);
module->Add(lv, Downcast<Function>(e), true);
}
return FuseMutator().Transform(expr, fuse_opt_level);
}
}
TVM_REGISTER_API("relay._ir_pass.FuseOps")
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -585,7 +585,7 @@ class PartialEvaluator : public ExprFunctor<PStatic(const Expr& e, LetList* ll)>
// Constant evaluate a expression.
PStatic ConstEvaluate(const Expr& expr, LetList* ll) {
Expr infered = InferType(expr, Module(nullptr));
Expr fused = FuseOps(infered, 0);
Expr fused = FuseOps(infered, 0, Module(nullptr));
Expr fused_infered = InferType(fused, Module(nullptr));
return Reify(executor_(fused_infered), ll);
}
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -26,6 +26,7 @@
*/
#include <tvm/relay/pass.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/logging.h>
#include "let_list.h"
#include "../../common/arena.h"
#include "pass_util.h"
......@@ -306,7 +307,22 @@ Expr ToANormalFormAux(const Expr& e,
Expr ToANormalForm(const Expr& e,
const Module& m,
std::unordered_set<GlobalVar, NodeHash, NodeEqual>* gv) {
return TransformF([&](const Expr& e) { return ToANormalFormAux(e, m, gv); }, e);
DLOG(INFO)
<< "ToANF:" << std::endl
<< AsText(e, false);
Expr ret =
TransformF([&](const Expr& e) {
return ToANormalFormAux(e, m, gv);
}, e);
CHECK_EQ(FreeVars(ret).size(), 0);
DLOG(INFO)
<< "ToANF: transformed" << std::endl
<< AsText(ret, false);
return ret;
}
Expr ToANormalForm(const Expr& e, const Module& m) {
......
......@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -796,7 +796,10 @@ Function InferType(const Function& func,
CHECK(WellFormed(func_ret));
auto free_tvars = FreeTypeVars(func_ret, mod);
CHECK(free_tvars.size() == 0)
<< "Found unbound type variables in " << func << ": " << free_tvars;
<< "Found unbound type variables in: "
<< std::endl
<< AsText(func, true)
<< std::endl << free_tvars;
return Downcast<Function>(func_ret);
}
......
......@@ -19,7 +19,7 @@
/*!
* Copyright (c) 2019 by Contributors
* \file tvm/runtime/memory_manager.cc
* \file tvm/runtime/vm/memory_manager.cc
* \brief Allocate and manage memory for the runtime.
*/
#include <utility>
......@@ -32,6 +32,24 @@ namespace tvm {
namespace runtime {
namespace vm {
inline void VerifyDataType(DLDataType dtype) {
CHECK_GE(dtype.lanes, 1);
if (dtype.code == kDLFloat) {
CHECK_EQ(dtype.bits % 8, 0);
} else {
// allow uint1 as a special flag for bool.
if (dtype.bits == 1 && dtype.code == kDLUInt) return;
CHECK_EQ(dtype.bits % 8, 0);
}
CHECK_EQ(dtype.bits & (dtype.bits - 1), 0);
}
inline size_t GetDataAlignment(const DLTensor& arr) {
size_t align = (arr.dtype.bits / 8) * arr.dtype.lanes;
if (align < kAllocAlignment) return kAllocAlignment;
return align;
}
MemoryManager* MemoryManager::Global() {
static MemoryManager memory_manager;
return &memory_manager;
......@@ -40,8 +58,8 @@ MemoryManager* MemoryManager::Global() {
Allocator* MemoryManager::GetAllocator(TVMContext ctx) {
std::lock_guard<std::mutex> lock(mu_);
if (allocators_.find(ctx) == allocators_.end()) {
// LOG(INFO) << "New allocator for " << DeviceName(ctx.device_type) << "("
// << ctx.device_id << ")";
DLOG(INFO) << "New allocator for " << DeviceName(ctx.device_type) << "("
<< ctx.device_id << ")";
std::unique_ptr<Allocator> alloc(new NaiveAllocator(ctx));
allocators_.emplace(ctx, std::move(alloc));
}
......
......@@ -26,6 +26,7 @@
#define TVM_RUNTIME_VM_MEMORY_MANAGER_H_
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/ndarray.h>
#include <functional>
#include <memory>
#include <mutex>
......
......@@ -35,7 +35,7 @@ namespace vm {
class NaiveAllocator final : public Allocator {
public:
explicit NaiveAllocator(TVMContext ctx) : Allocator(), used_memory_(0) {}
explicit NaiveAllocator(TVMContext ctx) : Allocator(), used_memory_(0), ctx_(ctx) {}
Buffer Alloc(size_t nbytes, size_t alignment, TVMType type_hint) override {
Buffer buf;
......
......@@ -41,9 +41,6 @@ std::ostream& operator<<(std::ostream& os, const ObjectTag& tag) {
case ObjectTag::kTensor:
os << "Tensor";
break;
case ObjectTag::kExternalFunc:
os << "ExternalFunction";
break;
default:
LOG(FATAL) << "Invalid object tag: found " << static_cast<int>(tag);
}
......@@ -68,21 +65,21 @@ Object Object::Closure(size_t func_index, const std::vector<Object>& free_vars)
}
ObjectPtr<TensorCell> Object::AsTensor() const {
CHECK(ptr.get());
CHECK(ptr.get()->tag == ObjectTag::kTensor);
return ptr.As<TensorCell>();
CHECK(ptr_.get());
CHECK(ptr_.get()->tag == ObjectTag::kTensor);
return ptr_.As<TensorCell>();
}
ObjectPtr<DatatypeCell> Object::AsDatatype() const {
CHECK(ptr.get());
CHECK(ptr.get()->tag == ObjectTag::kDatatype);
return ptr.As<DatatypeCell>();
CHECK(ptr_.get());
CHECK(ptr_.get()->tag == ObjectTag::kDatatype);
return ptr_.As<DatatypeCell>();
}
ObjectPtr<ClosureCell> Object::AsClosure() const {
CHECK(ptr.get());
CHECK(ptr.get()->tag == ObjectTag::kClosure);
return ptr.As<ClosureCell>();
CHECK(ptr_.get());
CHECK(ptr_.get()->tag == ObjectTag::kClosure);
return ptr_.As<ClosureCell>();
}
NDArray ToNDArray(const Object& obj) {
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file src/runtime/vm/vm.cc
* \brief The Relay virtual machine.
*/
#include <tvm/logging.h>
#include <tvm/runtime/vm.h>
#include <chrono>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <vector>
#include "../../runtime/vm/memory_manager.h"
#include "../../runtime/vm/naive_allocator.h"
using namespace tvm::runtime;
namespace tvm {
namespace runtime {
namespace vm {
Instruction::Instruction() {}
template <typename T>
static T* Duplicate(T* src, Index size) {
auto dst = new T[size];
std::copy(src, src + size, dst);
return dst;
}
Instruction::Instruction(const Instruction& instr) {
this->op = instr.op;
this->dst = instr.dst;
switch (instr.op) {
case Opcode::Move:
this->from = instr.from;
return;
case Opcode::Select:
this->select_cond = instr.select_cond;
this->select_op1 = instr.select_op1;
this->select_op2 = instr.select_op2;
return;
case Opcode::Ret:
this->result = instr.result;
return;
case Opcode::AllocTensor:
this->shape_register = instr.shape_register;
this->dtype = instr.dtype;
return;
case Opcode::AllocDatatype:
this->constructor_tag = instr.constructor_tag;
this->num_fields = instr.num_fields;
this->datatype_fields = Duplicate<RegName>(instr.datatype_fields, instr.num_fields);
return;
case Opcode::AllocClosure:
this->clo_index = instr.clo_index;
this->num_freevar = instr.num_freevar;
this->free_vars = Duplicate<RegName>(instr.free_vars, instr.num_freevar);
return;
case Opcode::InvokePacked:
this->packed_index = instr.packed_index;
this->arity = instr.arity;
this->output_size = instr.output_size;
this->packed_args = Duplicate<RegName>(instr.packed_args, instr.arity);
return;
case Opcode::InvokeClosure:
this->closure = instr.closure;
this->closure_args_num = instr.closure_args_num;
this->closure_args = Duplicate<RegName>(instr.closure_args, instr.closure_args_num);
return;
case Opcode::Invoke:
this->func_index = instr.func_index;
this->num_args = instr.num_args;
this->invoke_args_registers = Duplicate<RegName>(instr.invoke_args_registers, instr.num_args);
return;
case Opcode::If:
this->if_cond = instr.if_cond;
this->true_offset = instr.true_offset;
this->false_offset = instr.false_offset;
return;
case Opcode::LoadConst:
this->const_index = instr.const_index;
return;
case Opcode::GetField:
this->object = instr.object;
this->field_index = instr.field_index;
return;
case Opcode::Goto:
this->pc_offset = instr.pc_offset;
return;
default:
std::ostringstream out;
out << "Invalid instruction " << static_cast<int>(instr.op);
throw std::runtime_error(out.str());
}
}
Instruction::~Instruction() {
switch (this->op) {
case Opcode::Move:
case Opcode::Select:
case Opcode::Ret:
case Opcode::AllocTensor:
case Opcode::If:
case Opcode::LoadConst:
case Opcode::GetField:
case Opcode::Goto:
return;
case Opcode::AllocDatatype:
delete this->datatype_fields;
return;
case Opcode::AllocClosure:
delete this->free_vars;
return;
case Opcode::InvokePacked:
delete this->packed_args;
return;
case Opcode::InvokeClosure:
delete this->closure_args;
return;
case Opcode::Invoke:
delete this->invoke_args_registers;
return;
default:
std::ostringstream out;
out << "Invalid instruction " << static_cast<int>(this->op);
throw std::runtime_error(out.str());
}
}
Instruction Instruction::Ret(RegName result) {
Instruction instr;
instr.op = Opcode::Ret;
instr.result = result;
return instr;
}
Instruction Instruction::InvokePacked(Index packed_index, Index arity, Index output_size,
const std::vector<RegName>& args) {
Instruction instr;
instr.op = Opcode::InvokePacked;
instr.packed_index = packed_index;
instr.arity = arity;
instr.output_size = output_size;
instr.packed_args = new RegName[arity];
for (Index i = 0; i < arity; ++i) {
instr.packed_args[i] = args[i];
}
return instr;
}
Instruction Instruction::AllocTensor(RegName shape_register, DLDataType dtype, Index dst) {
Instruction instr;
instr.op = Opcode::AllocTensor;
instr.dst = dst;
instr.shape_register = shape_register;
instr.dtype = dtype;
return instr;
}
Instruction Instruction::AllocDatatype(Index tag, Index num_fields,
const std::vector<RegName>& datatype_fields, Index dst) {
Instruction instr;
instr.op = Opcode::AllocDatatype;
instr.dst = dst;
instr.constructor_tag = tag;
instr.num_fields = num_fields;
instr.datatype_fields = new RegName[num_fields];
for (Index i = 0; i < num_fields; ++i) {
instr.datatype_fields[i] = datatype_fields[i];
}
return instr;
}
Instruction Instruction::AllocClosure(Index func_index, Index free_vars,
const std::vector<RegName>& free_var_register, Index dst) {
Instruction instr;
instr.op = Opcode::AllocClosure;
instr.dst = dst;
instr.clo_index = func_index;
instr.num_freevar = free_vars;
instr.free_vars = new RegName[instr.num_freevar];
for (Index i = 0; i < instr.num_freevar; ++i) {
instr.free_vars[i] = free_var_register[i];
}
return instr;
}
Instruction Instruction::GetField(RegName object, Index field_index, RegName dst) {
Instruction instr;
instr.op = Opcode::GetField;
instr.dst = dst;
instr.object = object;
instr.field_index = field_index;
return instr;
}
Instruction Instruction::If(RegName cond, Index true_branch, Index false_branch) {
Instruction instr;
instr.op = Opcode::If;
instr.if_cond = cond;
instr.true_offset = true_branch;
instr.false_offset = false_branch;
return instr;
}
Instruction Instruction::Select(RegName cond, RegName op1, RegName op2, RegName dst) {
Instruction instr;
instr.op = Opcode::Select;
instr.dst = dst;
instr.select_cond = cond;
instr.select_op1 = op1;
instr.select_op2 = op2;
return instr;
}
Instruction Instruction::Goto(Index pc_offset) {
Instruction instr;
instr.op = Opcode::Goto;
instr.pc_offset = pc_offset;
return instr;
}
Instruction Instruction::Invoke(Index func_index, const std::vector<RegName>& args_registers,
RegName dst) {
Instruction instr;
instr.op = Opcode::Invoke;
instr.dst = dst;
instr.func_index = func_index;
instr.num_args = args_registers.size();
instr.invoke_args_registers = new RegName[instr.num_args];
for (Index i = 0; i < instr.num_args; ++i) {
instr.invoke_args_registers[i] = args_registers[i];
}
return instr;
}
Instruction Instruction::InvokeClosure(RegName closure, const std::vector<RegName>& args,
RegName dst) {
Instruction instr;
instr.op = Opcode::InvokeClosure;
instr.dst = dst;
instr.closure = closure;
instr.closure_args_num = args.size();
instr.closure_args = new RegName[args.size()];
for (size_t i = 0; i < args.size(); ++i) {
instr.closure_args[i] = args[i];
}
return instr;
}
Instruction Instruction::LoadConst(Index const_index, RegName dst) {
Instruction instr;
instr.op = Opcode::LoadConst;
instr.dst = dst;
instr.const_index = const_index;
return instr;
}
Instruction Instruction::Move(RegName src, RegName dst) {
Instruction instr;
instr.op = Opcode::Move;
instr.dst = dst;
instr.from = src;
return instr;
}
void DLDatatypePrint(std::ostream& os, const DLDataType& dtype) {
switch (dtype.code) {
case kDLInt:
os << "int";
break;
case kDLUInt:
os << "uint";
break;
case kDLFloat:
os << "float";
break;
}
os << dtype.bits;
if (dtype.lanes != 0) {
os << "[" << dtype.lanes << "]";
}
}
void InstructionPrint(std::ostream& os, const Instruction& instr) {
switch (instr.op) {
case Opcode::Move: {
os << "move " << instr.from << " " << instr.dst;
break;
}
case Opcode::Ret: {
os << "ret " << instr.result;
break;
}
case Opcode::InvokePacked: {
os << "invoke_packed ";
os << instr.packed_index;
os << " " << instr.arity;
os << "(";
for (Index i = 0; i < instr.arity; ++i) {
os << instr.packed_args[i] << ",";
}
os << ")";
os << " " << instr.output_size;
break;
}
case Opcode::AllocTensor: {
os << "alloc_tensor ";
os << instr.dst << " ";
os << instr.shape_register << " ";
DLDatatypePrint(os, instr.dtype);
break;
}
case Opcode::AllocDatatype: {
os << "alloc_data ";
os << instr.dst << " ";
os << instr.constructor_tag << " ";
os << instr.num_fields;
break;
}
case Opcode::AllocClosure: {
os << "alloc_closure ";
os << instr.dst << " ";
os << instr.clo_index << " ";
os << instr.num_freevar << "(";
for (Index i = 0; i < instr.num_freevar; ++i) {
os << instr.free_vars[i] << ",";
}
os << ")";
break;
}
case Opcode::If: {
os << "if "
<< "$" << instr.if_cond << " " << instr.true_offset << " " << instr.false_offset;
break;
}
case Opcode::Invoke: {
os << "invoke "
<< "$" << instr.dst << " " << instr.func_index << " " << instr.num_args << "(";
for (Index i = 0; i < instr.num_args; ++i) {
os << instr.invoke_args_registers[i] << ",";
}
os << ")";
break;
}
case Opcode::InvokeClosure: {
os << "invoke_closure "
<< "$" << instr.dst << " " << instr.closure << " " << instr.closure_args_num << "()";
break;
}
case Opcode::LoadConst: {
os << "load_const "
<< "$" << instr.dst << " " << instr.const_index;
break;
}
case Opcode::GetField: {
os << "get_field " << instr.dst << " " << instr.object << " " << instr.field_index;
break;
}
case Opcode::Goto: {
os << "goto " << instr.pc_offset;
break;
}
case Opcode::Select: {
os << "select " << instr.dst << " " << instr.select_cond << " " << instr.select_op1 << " "
<< instr.select_op2;
break;
}
default:
LOG(FATAL) << "should never hit this case" << static_cast<int>(instr.op);
break;
}
}
std::ostream& operator<<(std::ostream& os, const Instruction& instr) {
InstructionPrint(os, instr);
return os;
}
void VMFunctionPrint(std::ostream& os, const VMFunction& vm_func) {
os << vm_func.name << ": " << std::endl;
for (size_t i = 0; i < vm_func.instructions.size(); ++i) {
os << i << ": ";
InstructionPrint(os, vm_func.instructions[i]);
os << ";" << std::endl;
}
}
std::ostream& operator<<(std::ostream& os, const VMFunction& vm_func) {
VMFunctionPrint(os, vm_func);
return os;
}
void VirtualMachine::PushFrame(Index arg_count, Index ret_pc, const VMFunction& vm_func) {
auto frame = VMFrame(ret_pc, func_index, arg_count, code, vm_func.register_file_size);
frames.push_back(frame);
}
Index VirtualMachine::PopFrame() {
CHECK_GT(frames.size(), 0);
const VMFrame& fr = frames.back();
func_index = fr.func_index;
code = fr.code;
pc = fr.pc;
auto call_stack_size = frames.size();
frames.pop_back();
return call_stack_size;
}
void VirtualMachine::InvokeGlobal(const VMFunction& func, const std::vector<Object>& args) {
DLOG(INFO) << "===================\nInvoking global " << func.name << " " << args.size()
<< std::endl;
PushFrame(func.params, this->pc + 1, func);
for (size_t i = 0; i < args.size(); ++i) {
WriteRegister(i, args[i]);
}
DLOG(INFO) << "func.params= " << func.params << std::endl;
code = func.instructions.data();
pc = 0;
}
Object VirtualMachine::Invoke(const VMFunction& func, const std::vector<Object>& args) {
DLOG(INFO) << "Executing Function: " << std::endl << func << std::endl;
InvokeGlobal(func, args);
Run();
auto alloc = MemoryManager::Global()->GetAllocator(ctxs[0]);
DLOG(INFO) << "Memory used: " << alloc->UsedMemory() << " B\n";
return return_register;
}
Object VirtualMachine::Invoke(const std::string& name, const std::vector<Object>& args) {
auto func_index = this->global_map_[name];
DLOG(INFO) << "Invoke Global " << name << " at index " << func_index << std::endl;
return Invoke(this->functions[func_index], args);
}
void InvokePacked(const PackedFunc& func, Index arg_count, Index output_size,
const std::vector<Object>& args) {
std::vector<TVMValue> values(arg_count);
std::vector<int> codes(arg_count);
runtime::TVMArgsSetter setter(values.data(), codes.data());
for (Index i = 0; i < arg_count; i++) {
NDArray data = ToNDArray(args[i]);
setter(i, data);
}
TVMRetValue rv;
func.CallPacked(TVMArgs(values.data(), codes.data(), arg_count), &rv);
}
void VirtualMachine::Init(const std::vector<TVMContext>& ctxs) { this->ctxs = ctxs; }
inline void VirtualMachine::WriteRegister(Index r, const Object& val) {
frames.back().register_file[r] = val;
}
inline Object VirtualMachine::ReadRegister(Index r) const {
return frames.back().register_file[r];
}
void VirtualMachine::Run() {
CHECK(this->code);
this->pc = 0;
Index frame_start = frames.size();
while (true) {
main_loop:
auto const& instr = this->code[this->pc];
DLOG(INFO) << "\nExecuting(" << pc << "): ";
#if USE_RELAY_DEBUG
InstructionPrint(std::cout, instr);
#endif // USE_RELAY_DEBUG
switch (instr.op) {
case Opcode::Move: {
Object from_obj;
if (instr.from == 0) {
from_obj = return_register;
} else {
from_obj = ReadRegister(instr.from);
}
WriteRegister(instr.dst, from_obj);
pc++;
goto main_loop;
}
case Opcode::LoadConst: {
WriteRegister(instr.dst, this->constants[instr.const_index]);
pc++;
goto main_loop;
}
case Opcode::Invoke: {
std::vector<Object> args;
for (Index i = 0; i < instr.num_args; ++i) {
args.push_back(ReadRegister(instr.invoke_args_registers[i]));
}
InvokeGlobal(this->functions[instr.func_index], args);
frames.back().caller_return_register = instr.dst;
goto main_loop;
}
case Opcode::InvokePacked: {
const auto& func = packed_funcs[instr.packed_index];
const auto& arity = instr.arity;
std::vector<Object> args;
for (Index i = 0; i < arity; ++i) {
args.push_back(ReadRegister(instr.packed_args[i]));
}
InvokePacked(func, arity, instr.output_size, args);
for (Index i = 0; i < instr.output_size; ++i) {
WriteRegister(instr.packed_args[instr.arity - instr.output_size + i],
args[instr.arity - instr.output_size + i]);
}
pc++;
goto main_loop;
}
case Opcode::InvokeClosure: {
auto object = ReadRegister(instr.closure);
const auto& closure = object.AsClosure();
std::vector<Object> args;
for (Index i = 0; i < instr.closure_args_num; ++i) {
args.push_back(ReadRegister(instr.closure_args[i]));
}
for (auto free_var : closure->free_vars) {
args.push_back(free_var);
}
InvokeGlobal(this->functions[closure->func_index], args);
frames.back().caller_return_register = instr.dst;
goto main_loop;
}
case Opcode::GetField: {
auto object = ReadRegister(instr.object);
CHECK(object->tag == ObjectTag::kDatatype)
<< "Object is not data type object, register " << instr.object << ", Object tag "
<< static_cast<int>(object->tag);
const auto& tuple = object.AsDatatype();
auto field = tuple->fields[instr.field_index];
WriteRegister(instr.dst, field);
pc++;
goto main_loop;
}
case Opcode::Goto: {
pc += instr.pc_offset;
goto main_loop;
}
case Opcode::If: {
// How do we do this efficiently?
DLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
const auto& cond = ReadRegister(instr.if_cond);
NDArray cpu_array = ToNDArray(cond).CopyTo(cpu_ctx);
// CHECK_EQ(cpu_array->dtype, Bool());
bool branch = reinterpret_cast<uint8_t*>(cpu_array->data)[0];
if (branch) {
pc += instr.true_offset;
} else {
pc += instr.false_offset;
}
goto main_loop;
}
case Opcode::AllocTensor: {
DLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
auto shape_tensor_obj = ReadRegister(instr.shape_register);
NDArray shape_tensor = ToNDArray(shape_tensor_obj).CopyTo(cpu_ctx);
int64_t* dims = static_cast<int64_t*>(shape_tensor->data);
auto num_dims = shape_tensor->shape[0];
auto shape = std::vector<int64_t>(shape_tensor->shape[0]);
shape.assign(dims, dims + num_dims);
auto allocator = MemoryManager::Global()->GetAllocator(ctxs[0]);
auto data = allocator->Empty(shape, instr.dtype, ctxs[0]);
auto obj = Object::Tensor(data);
WriteRegister(instr.dst, obj);
pc++;
goto main_loop;
}
case Opcode::AllocDatatype: {
std::vector<Object> fields;
for (Index i = 0; i < instr.num_fields; ++i) {
fields.push_back(ReadRegister(instr.datatype_fields[i]));
}
Object obj = Object::Datatype(instr.constructor_tag, fields);
WriteRegister(instr.dst, obj);
pc++;
goto main_loop;
}
case Opcode::AllocClosure: {
std::vector<Object> free_vars;
for (Index i = 0; i < instr.num_freevar; i++) {
free_vars.push_back(ReadRegister(instr.free_vars[i]));
}
WriteRegister(instr.dst, Object::Closure(instr.func_index, free_vars));
pc++;
goto main_loop;
}
case Opcode::Select: {
DLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
auto cond = ReadRegister(instr.select_cond);
NDArray cpu_array = ToNDArray(cond).CopyTo(cpu_ctx);
// CHECK_EQ(TVMType2Type(cpu_array->dtype), Bool());
bool branch = reinterpret_cast<uint8_t*>(cpu_array->data)[0];
if (branch) {
auto op1 = ReadRegister(instr.select_op1);
WriteRegister(instr.dst, op1);
} else {
auto op2 = ReadRegister(instr.select_op2);
WriteRegister(instr.dst, op2);
}
pc++;
goto main_loop;
}
case Opcode::Ret: {
// If we have hit the point from which we started
// running, we should return to the caller breaking
// the dispatch loop.
return_register = ReadRegister(instr.result);
auto caller_return_register = frames.back().caller_return_register;
if (PopFrame() == frame_start) {
return;
// Otherwise we are just returning from a local call.
} else {
WriteRegister(caller_return_register, return_register);
goto main_loop;
}
}
}
}
}
} // namespace vm
} // namespace runtime
} // namespace tvm
......@@ -14,6 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.tools import nottest
import tvm
from tvm import relay
from tvm.relay.ir_pass import dead_code_elimination, alpha_equal
......@@ -51,7 +53,7 @@ def test_used_let():
orig = relay.Let(e.c, e.one, e.c + e.c)
assert alpha_equal(dead_code_elimination(orig), relay.Let(e.c, e.one, e.c + e.c))
@nottest
def test_inline():
orig = relay.Let(e.a, e.b, relay.Let(e.c, e.d, e.c))
assert alpha_equal(dead_code_elimination(orig), e.d)
......
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
def test_eta_expand_basic():
mod = relay.Module()
x = relay.var('x', 'int32')
y = relay.var('y', 'int32')
orig = relay.Function([x], x)
got = relay.ir_pass.eta_expand(orig, mod)
expected = relay.Function([y], orig(y))
got = relay.ir_pass.infer_type(got, mod)
expected = relay.ir_pass.infer_type(expected, mod)
assert(relay.ir_pass.alpha_equal(got, expected))
if __name__ == "__main__":
test_eta_expand_basic()
......@@ -25,6 +25,7 @@ from tvm.relay.backend.interpreter import Value, TupleValue, ConstructorValue
from tvm.relay.prelude import Prelude
from tvm.relay import create_executor
from nose.tools import nottest
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
ctx = tvm.context("llvm", 0)
......@@ -45,8 +46,9 @@ def test_tuple():
f = relay.Function([x], body, None, [t])
assert alpha_equal(dcpe(f), relay.Function([x], x, None, [t]))
@nottest
def test_const_inline():
# TODO(MK): fix me
d = relay.Var("d")
double = relay.Function([d], d + d)
orig = double(relay.const(4.0))
......@@ -63,8 +65,9 @@ def test_ref():
square = relay.Function([d], body)
assert alpha_equal(dcpe(square), relay.Function([d], d * d))
@nottest
def test_ad():
# TODO(MK): fix me
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
......
......@@ -616,6 +616,7 @@ inline Array<Tensor> split_sections(const Tensor& x,
*
* \param a The source array.
* \param indices The indices of the values to extract.
* \param mode The mode of the operation.
* \param name The name of the operation.
* \param mode The mode of to handle out of bound indices.
* \param tag The tag to mark the operation.
......@@ -656,7 +657,7 @@ inline Tensor take(const Tensor& a,
* \param indices The indices of the values to extract.
* \param axis The axis over which to select values. By default,
* the flattened input array is used.
* \param mode The mode of to handle out of bound indices.
* \param mode The mode for handling out of bound indices.
* \param name The name of the operation.
* \param tag The tag to mark the operation.
*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment