Commit ffcb2a5e by Yida Wang Committed by Tianqi Chen

explain the lowering process in nnvm.compiler.build (#339)

parent 6c198621
......@@ -207,7 +207,7 @@ def build(graph, target=None, shape=None, dtype="float32", params=None, target_h
The final execution graph.
libmod : tvm.Module
The modue that comes with the execution graph
The module that comes with the execution graph
params : dict of str to NDArray
The updated parameters of graph if params is passed.
......@@ -236,7 +236,7 @@ def build(graph, target=None, shape=None, dtype="float32", params=None, target_h
if params and cfg.pass_enabled("PrecomputePrune"):
graph, params = precompute_prune(graph, params)
shape, dtype = _update_shape_dtype(shape, dtype, params)
# Operator Fusion and generatiom
# Operator Fusion and generation
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph_attr.set_dtype_inputs(graph, dtype)
graph._set_json_attr("target", str(target), "str")
......
......@@ -67,10 +67,13 @@ print(net.debug_str())
# optimization while TVM does the tensor-level optimization, resulting
# in an optimized runtime module for model serving.
#
# We'll first compile for Nvidia GPU.
# To generate the module library, TVM will first transfer graph IR into lower
# intrinsic IR for the specified target backend, which is CUDA in this example.
# Then target backend will generate module library.
# We'll first compile for Nvidia GPU. Behind the scene, `nnvm.compiler.build`
# first does a number of graph-level optimizations, e.g. pruning, fusing, etc.,
# then registers the operators (i.e. the nodes of the optmized graphs) to
# TVM implementations to generate a `tvm.module`.
# To generate the module library, TVM will first transfer the HLO IR into the lower
# intrinsic IR of the specified target backend, which is CUDA in this example.
# Then the machine code will be generated as the module library.
opt_level = 0
target = tvm.target.cuda()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment