ir_pass.h 14.4 KB
Newer Older
tqchen committed
1 2
/*!
 *  Copyright (c) 2016 by Contributors
tqchen committed
3
 * \file tvm/ir_pass.h
tqchen committed
4 5
 * \brief Collection of IR pass functions
 *
6 7
 *  When the pass functions in this file are for Stmt,
 *  we can use PassFunction(Evaluate(expr)) to apply it to Expr
tqchen committed
8 9 10 11
 */
#ifndef TVM_IR_PASS_H_
#define TVM_IR_PASS_H_

12
#include <tvm/ir_functor.h>
13
#include <arithmetic/Simplify.h>
tqchen committed
14
#include <unordered_map>
tqchen committed
15
#include <vector>
16
#include <string>
tqchen committed
17
#include "./expr.h"
18
#include "./buffer.h"
tqchen committed
19
#include "./schedule.h"
20
#include "./lowered_func.h"
tqchen committed
21 22 23 24

namespace tvm {
namespace ir {

25 26 27 28 29 30
/*!
 * \brief Simplify the expression.
 * \param expr The expression to be simplifed.
 * \param vrange The range information about the variable.
 * \return Canonicalized statement.
 */
31
EXPORT Expr Simplify(Expr expr, Map<Var, Range> vrange = Map<Var, Range>());
32

33 34 35 36 37 38 39
/*!
 * \brief Simplify the statement.
 * \param stmt The statement to be simplifed.
 * \param vrange The range information about the variable.
 * \return Canonicalized statement.
 */
Stmt Simplify(Stmt stmt, Map<Var, Range> vrange = Map<Var, Range>());
40 41

/*!
42 43
 * \brief Simplify by applying canonical form.
 * \param stmt The statement to be canonically simplifed.
44
 * \param vrange The range information about the variable.
45 46
 * \return Canonicalized statement.
 */
47 48
Stmt CanonicalSimplify(Stmt stmt,
                       Map<Var, Range> vrange = Map<Var, Range>());
49 50 51 52

/*!
 * \brief Simplify by applying canonical form.
 * \param expr The statement to be canonically simplifed.
53
 * \param vrange The range information about the variable.
54 55
 * \return Canonicalized expression.
 */
56 57
EXPORT Expr CanonicalSimplify(Expr expr,
                              Map<Var, Range> vrange = Map<Var, Range>());
58 59

/*!
60 61 62 63 64
 * \brief Deep compare lhs and rhs
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
65
EXPORT bool Equal(const Expr& lhs, const Expr& rhs);
66 67 68 69 70 71 72 73 74 75

/*!
 * \brief Deep compare lhs and rhs
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
bool Equal(const Stmt& lhs, const Stmt& rhs);

/*!
76 77 78 79 80 81 82 83 84 85 86 87 88
 * \brief Deep compare lhs and rhs.
 *
 *  If you only want equality comparison, use Equal
 *  which will also tie definitions. The compare mode
 *  will give order of expression in total order.
 *
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
int Compare(const Expr& lhs, const Expr& rhs);

/*!
tqchen committed
89 90 91 92 93 94
 * \brief verifies whether the IR stmt or Expr is in SSA form.
 *  That is: each VarExpr is defined and assigned once(in Let/For)
 *
 * \param ir The root of the IR DAG.
 * \return Whether IR is in SSA form.
 * \note All the passes in this file uses SSA form and outputs SSA form.
tqchen committed
95
 */
tqchen committed
96
bool VerifySSA(const Stmt& ir);
tqchen committed
97 98

/*!
99 100 101 102 103 104
 * \brief Whether the expression have side effect.
 * \return whether expression have side effect
 */
bool HasSideEffect(const Expr& e);

/*!
105 106 107 108 109 110 111 112
 * \brief Whether e expression used var.
 * \param e The expression to be checked.
 * \param v The variable.
 * \return Whether e uses v.
 */
bool ExprUseVar(const Expr& e, const Var& v);

/*!
113 114 115 116 117 118 119 120
 * \brief Whether e expression used any var in variable set..
 * \param e The expression to be checked.
 * \param vset The variable set.
 * \return Whether e uses vset.
 */
bool ExprUseVar(const Expr& e, const std::unordered_set<const Variable*>& vset);

/*!
tqchen committed
121 122 123 124
 * \brief Convert a IR node to be SSA form.
 * \param stmt The source statement to be converted.
 * \return The converted form.
 */
tqchen committed
125
Stmt ConvertSSA(Stmt stmt);
tqchen committed
126 127

/*!
128 129 130 131 132
 * \brief Substitute the var specified in key->var to be value.
 * \param stmt The source statement to be substituted
 * \param value_map The map of new values.
 * \return The converted form.
 */
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
Stmt Substitute(Stmt stmt,
                const std::unordered_map<const Variable*, Expr>& value_map);

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param expr The source expression to be substituted
 * \param value_map The map of new values.
 * \return The converted expression.
 */
Expr Substitute(Expr expr,
                const std::unordered_map<const Variable*, Expr>& value_map);

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param stmt The source statement to be substituted
 * \param value_map The map of new values.
 * \return The converted form.
 */
151
Stmt Substitute(Stmt stmt, const Map<Var, Expr>& value_map);
152 153 154 155 156 157 158

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param expr The source expression to be substituted
 * \param value_map The map of new values.
 * \return The converted expression.
 */
ziheng committed
159
Expr Substitute(Expr expr, const Map<Var, Expr>& value_map);
160 161

/*!
tqchen committed
162 163
 * \brief inline all calls of f in stmt.
 *
164
 * \param stmt The statement to apply inline optimization.
tqchen committed
165 166
 * \param f The function reference to be inlined
 * \param args The arguments variable of the function.
167
 * \param body The definition body of the function.
tqchen committed
168 169 170 171
 * \return The result stmt
 *
 * \note All the passes in this file uses SSA form and outputs SSA form.
 */
172 173
Stmt Inline(Stmt stmt,
            FunctionRef f,
tqchen committed
174
            Array<Var> args,
175 176 177 178 179 180 181 182 183
            Expr body);

/*!
 * \brief Flatten the multi-dimensional read/write
 *  to single dimensional Load/Store
 *
 * \param stmt The stmt to be trasnformed.
 * \param extern_buffer Map specifies external
 *    buffer assignment of input and outputs.
184
 * \param cache_line_size The size of CPU cache line.
185
 * \return Transformed stmt.
186 187
 */
Stmt StorageFlatten(Stmt stmt,
188 189
                    Map<Tensor, Buffer> extern_buffer,
                    int cache_line_size);
tqchen committed
190

191
/*!
Tianqi Chen committed
192 193 194 195 196 197 198 199 200
 * \brief Remove No Op from the Stmt.
 * \param stmt The stmt to be trasnformed
 * \return Transformed stmt.
 */
Stmt RemoveNoOp(Stmt stmt);

/*!
 * \brief Split statement into pipeine stages.
 * \param stmt The stmt to be splitted
201 202 203 204 205 206 207 208
 * \param split_load Whether split load into its own stage.
 * \return Transformed stmt.
 */
Stmt SplitPipeline(Stmt stmt, bool split_load);

/*!
 * \brief Narrow channel access to smaller range.
 * \param stmt The stmt to do access rewriting.
Tianqi Chen committed
209 210
 * \return Transformed stmt.
 */
211
Stmt NarrowChannelAccess(Stmt stmt);
Tianqi Chen committed
212 213

/*!
214 215 216
 * \brief unroll the constant loop marked by unroll.
 * This pass also automatically attach pragma unroll tag to loops which meets the standard.
 *
217
 * \param stmt The statment to be unrolled.
218
 * \param auto_max_step The maximum step before stop attach automatic unroll
219
 * \param auto_max_depth The maximum depth before stop attach automatic unroll
220 221
 * \param auto_max_extent The maximum extent of the loop we can unroll,
 *                        this is an legacy option that donot take the loop total steps into account.
222
 * \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen.
223
 * \return Transformed stmt.
224
 */
225 226
Stmt UnrollLoop(Stmt stmt,
                int auto_max_step,
227
                int auto_max_depth,
228 229
                int auto_max_extent,
                bool explicit_unroll);
230 231

/*!
232 233
 * \brief vectorize the constant loops
 * \param stmt The statment to be vectorized.
234
 * \return Transformed stmt.
235 236 237 238
 */
Stmt VectorizeLoop(Stmt stmt);

/*!
239 240 241 242 243 244 245
 * \brief Inject virtual thread loops into stmt.
 * \param stmt The statment to be transformed.
 * \return Transformed stmt.
 */
Stmt InjectVirtualThread(Stmt stmt);

/*!
246 247 248 249 250 251 252
 * \brief Inject prefetch instructions into stmt.
 * \param stmt The statment to be transformed.
 * \return Transformed stmt.
 */
Stmt InjectPrefetch(Stmt stmt);

/*!
253 254
 * \brief Inject double buffer into stmt.
 * \param stmt The statment to be transformed.
255
 * \param split_loop Loop splitting factor.
256 257
 * \return Transformed stmt.
 */
258
Stmt InjectDoubleBuffer(Stmt stmt, int split_loop);
259 260

/*!
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
 * \brief Inject copy intrinsics with optional pad.
 *
 * \param stmt The statment to be transformed.
 * \param pragma_key The pragma key for hint of copy.
 * \param fintrin The function with signature
 *
 *   Stmt fintrin(Buffer src,
 *                Buffer dst,
 *                Array<Expr> pad_before,
 *                Array<Expr> pad_after,
 *                Expr pad_value)
 * \return Transformed stmt.
 */
Stmt InjectCopyIntrin(Stmt stmt,
                      const std::string& pragma_key,
                      const runtime::PackedFunc& fintrin);

/*!
279 280 281 282
 * \brief Rewrite storage allocation pattern.
 *  Moves the allocation to outer most possible scope.
 *  Trying to share space between allocations to make
 *  a static allocation plan when possible.
283 284 285 286
 *
 * \param stmt The stmt to be trasnformed
 * \return Transformed stmt.
 */
287
Stmt StorageRewrite(Stmt stmt);
288 289

/*!
290 291
 * \brief partition loops in the stmt
 * \param stmt The stmt to do loop partition
292
 * \param split_const_loop flag to enable partition for const loop
293 294
 * \return Transformed stmt.
 */
295
Stmt LoopPartition(Stmt stmt, bool split_const_loop);
296 297

/*!
298 299 300 301 302 303 304 305
 * \brief Detect and insert sync points to co-processor.
 *
 * \param stmt The stmt to be trasnformed
 * \return Transformed stmt.
 */
Stmt CoProcSync(Stmt stmt);

/*!
306 307 308 309 310 311 312 313 314
 * \brief Lift common attrs with attr_key to outer scope.
 *
 * \param stmt The stmt to be trasnformed
 * \param attr_key The attribute key to be checked.
 * \return Transformed stmt.
 */
Stmt LiftAttrScope(Stmt stmt, std::string attr_key);

/*!
315 316 317 318 319 320 321
 * \brief Detect and rewrite unsafe select that contains memory access.
 * \param stmt The statment to be rewritten.
 * \return Transformed stmt.
 */
Stmt RewriteUnsafeSelect(Stmt stmt);

/*!
322 323 324 325 326 327 328 329 330
 * \brief Lower attached storage access information.
 * Do this pass after all storage access analysis finish.
 *
 * \param stmt The stmt to be trasnformed
 * \return Transformed stmt.
 */
Stmt LowerStorageAccessInfo(Stmt stmt);

/*!
331 332 333 334 335 336 337 338 339
 * \brief Make an user callable API LoweredFunc.
 *
 *  The main task of this function is to create code to :
 *   - Map the values in the api_args to of Var that is required by body.
 *   - Insert assertions to check type/value of the passed arguments.
 *
 * \param body The body of the function.
 * \param name The name of the function.
 * \param api_args Arguments to the function, can be either Var, or Buffer
340 341
 * \param num_unpacked_args Number of arguments that
 *         are processed in plain form instead of packed form.
342 343 344
 * \param is_restricted Whether the caller can guarantee that each buffer argument do not overlap.
 *  It is recommended to set to true for optimized code if such invariant holds.
 *
345 346 347 348 349
 * \return a LoweredFunc with the specified signiture.
 *
 * \note
 *  The function signiture have two cases
 *
350 351
 *  let num_packed_args = len(api_args) - num_unpacked_args;
 *
352 353 354 355 356 357 358 359 360 361 362 363 364 365
 *  if num_packed_args is zero:
 *     f(api_arg_0, api_arg_1, .., api_arg_n) where n == len(api_args)
 *
 *  if num_packed_args is not zero:
 *       f(TVMArg* packed_args, int* packed_arg_type_ids, int num_packed_args,
 *         api_arg_k, api_arg_k+1, ... api_arg_n)
 *
 *       where n == len(api_args), k == num_packed_args
 *
 *  There is no thread_axis in generated function.
 */
LoweredFunc MakeAPI(Stmt body,
                    std::string name,
                    Array<NodeRef> api_args,
366 367
                    int num_unpacked_args,
                    bool is_restricted);
368 369

/*!
370 371 372 373 374 375 376 377
 * \brief Bind the device type of host function to be device_type.
 * \param func The function to be binded.
 * \param device_type The device type to be binded.
 * \return The binded function.
 */
LoweredFunc BindDeviceType(LoweredFunc func,
                           int device_type);
/*!
378 379 380 381
 * \brief Find undefined vars in the statment.
 * \param stmt The function to be checked.
 * \param defs The vars that is defined.
 * \return Array of undefined vars.
382
 */
383
Array<Var> UndefinedVars(const Stmt& stmt, const Array<Var>& defs);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

/*!
 * \brief Split the function into a host function and device functions.
 * \param func The function to be splitted.
 *
 * \return Array of functions, the first one is host function,
 *     the others are device functions.
 */
Array<LoweredFunc> SplitHostDevice(LoweredFunc func);

/*!
 * \brief Insert sync between parallel read/write of shared buffers.
 *
 * \param stmt The stmt to be trasnformed.
 * \param storage_scope The storage scope considered.
 */
400
LoweredFunc ThreadSync(LoweredFunc stmt, std::string storage_scope);
401

402 403 404 405 406 407 408
/*!
 * \brief Lower cross thread alleduce in the stmt.
 * \param f The device function to be lowered.
 * \param warp_size the size of warp where no sync is needed.
 * \return Transformed function.
 */
LoweredFunc LowerThreadAllreduce(LoweredFunc f, int warp_size);
409 410

/*!
411 412 413 414 415 416 417 418 419
 * \brief Lower warp memory in stmt.
 * \param f The device function to be lowered.
 * \param warp_size the size of warp where no sync is needed.
 *        this function will only take in effect if warp_size is bigger than one.
 * \return Transformed function.
 */
LoweredFunc LowerWarpMemory(LoweredFunc f, int warp_size);

/*!
420 421 422 423 424 425 426 427 428 429 430 431 432 433
 * \brief Remap the thread axis
 *
 *  This can be used to get equivalent program which uses
 *  threadIdx.y in place of threadIdx.x by passing
 *  {"threadIdx.x": thread_axis("threadIdx.y")}
 *
 *
 * \param f The device function to be lowered.
 * \param axis_map The map from StringImm -> ItrVar
 * \return Transformed function.
 */
LoweredFunc RemapThreadAxis(LoweredFunc f, Map<Expr, IterVar> axis_map);

/*!
434 435 436 437
 * \brief Lower packed function call.
 * \param f The function to be lowered.
 * \return Transformed function.
 */
438
LoweredFunc LowerTVMBuiltin(LoweredFunc f);
439 440

/*!
441 442 443 444 445 446 447
 * \brief Combine context function calls.
 * \param f The host function to be lowered.
 * \return Transformed function.
 */
LoweredFunc CombineContextCall(LoweredFunc f);

/*!
448 449 450 451 452 453 454 455 456 457 458 459
 * \brief Rewrite the pointer content type of arguments,
 *  as well as Alloc internal to the function to use
 *  the most frequently accessed type for load/store
 *  to avoid pointer casting in backend when possible.
 *
 * \note implemeneted in storage_rewrite.cc
 * \param f The function to be trasnformed
 * \return Transformed function.
 */
LoweredFunc PointerValueTypeRewrite(LoweredFunc f);

/*!
460 461 462 463 464 465
 * \brief Lower intrinsic function calls.
 * \param f The device function to be lowered.
 * \param target The target device.
 * \return Transformed function.
 */
LoweredFunc LowerIntrin(LoweredFunc f, const std::string& target);
466 467 468 469 470 471 472 473 474 475 476 477 478 479

/*!
 * \brief Verify if memory accesses are legal for a specific target device type.
 *
 *  In the case that tgt is cuda, if not all workload is bound with
 *  threads, CPU code is generated that tries to access GPU memory,
 *  which is illegal. This pass performs verification for this case.
 *
 * \param func The function to be verified.
 * \param device_type The target device type.
 * \return Success of memory verification.
 */
bool VerifyMemory(LoweredFunc func, int device_type);

tqchen committed
480 481 482 483
}  // namespace ir
}  // namespace tvm

#endif  // TVM_IR_PASS_H_