ir_pass.h 18.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

tqchen committed
20
/*!
21
 * \file tvm/tir/ir_pass.h
tqchen committed
22 23
 * \brief Collection of IR pass functions
 *
24 25
 *  When the pass functions in this file are for Stmt,
 *  we can use PassFunction(Evaluate(expr)) to apply it to Expr
tqchen committed
26
 */
27 28
#ifndef TVM_TIR_IR_PASS_H_
#define TVM_TIR_IR_PASS_H_
tqchen committed
29

30
#include <tvm/te/schedule.h>
31 32 33
#include <tvm/tir/expr.h>
#include <tvm/tir/buffer.h>
#include <tvm/tir/lowered_func.h>
34

tqchen committed
35
#include <unordered_map>
36
#include <unordered_set>
tqchen committed
37
#include <vector>
38
#include <string>
39

tqchen committed
40 41

namespace tvm {
42
namespace tir {
tqchen committed
43

44 45 46 47 48 49
/*!
 * \brief Simplify the expression.
 * \param expr The expression to be simplifed.
 * \param vrange The range information about the variable.
 * \return Canonicalized statement.
 */
50
TVM_DLL PrimExpr Simplify(PrimExpr expr, Map<Var, Range> vrange = Map<Var, Range>());
51

52 53 54 55 56 57 58
/*!
 * \brief Simplify the statement.
 * \param stmt The statement to be simplifed.
 * \param vrange The range information about the variable.
 * \return Canonicalized statement.
 */
Stmt Simplify(Stmt stmt, Map<Var, Range> vrange = Map<Var, Range>());
59 60

/*!
61 62
 * \brief Simplify by applying canonical form.
 * \param stmt The statement to be canonically simplifed.
63
 * \param vrange The range information about the variable.
64 65
 * \return Canonicalized statement.
 */
66 67
Stmt CanonicalSimplify(Stmt stmt,
                       Map<Var, Range> vrange = Map<Var, Range>());
68 69 70 71

/*!
 * \brief Simplify by applying canonical form.
 * \param expr The statement to be canonically simplifed.
72
 * \param vrange The range information about the variable.
73 74
 * \return Canonicalized expression.
 */
75
TVM_DLL PrimExpr CanonicalSimplify(PrimExpr expr,
76
                                   Map<Var, Range> vrange = Map<Var, Range>());
77 78

/*!
79 80 81 82 83
 * \brief Deep compare lhs and rhs
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
84
TVM_DLL bool Equal(const PrimExpr& lhs, const PrimExpr& rhs);
85 86 87 88 89 90 91 92 93 94

/*!
 * \brief Deep compare lhs and rhs
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
bool Equal(const Stmt& lhs, const Stmt& rhs);

/*!
95 96 97 98 99 100 101 102 103 104
 * \brief Deep compare lhs and rhs.
 *
 *  If you only want equality comparison, use Equal
 *  which will also tie definitions. The compare mode
 *  will give order of expression in total order.
 *
 * \param lhs The left operand
 * \param rhs The right operand
 * \return The comparison result.
 */
105
int Compare(const PrimExpr& lhs, const PrimExpr& rhs);
106 107

/*!
tqchen committed
108 109 110 111 112 113
 * \brief verifies whether the IR stmt or Expr is in SSA form.
 *  That is: each VarExpr is defined and assigned once(in Let/For)
 *
 * \param ir The root of the IR DAG.
 * \return Whether IR is in SSA form.
 * \note All the passes in this file uses SSA form and outputs SSA form.
tqchen committed
114
 */
115
TVM_DLL bool VerifySSA(const Stmt& ir);
tqchen committed
116 117

/*!
118 119 120
 * \brief Whether the expression have side effect.
 * \return whether expression have side effect
 */
121
TVM_DLL bool HasSideEffect(const PrimExpr& e);
122 123

/*!
124 125 126 127 128
 * \brief Whether e expression used var.
 * \param e The expression to be checked.
 * \param v The variable.
 * \return Whether e uses v.
 */
129
bool ExprUseVar(const PrimExpr& e, const Var& v);
130 131

/*!
132 133 134 135 136
 * \brief Whether e expression used any var in variable set..
 * \param e The expression to be checked.
 * \param vset The variable set.
 * \return Whether e uses vset.
 */
137
bool ExprUseVar(const PrimExpr& e, const std::unordered_set<const VarNode*>& vset);
138 139

/*!
tqchen committed
140 141 142 143
 * \brief Convert a IR node to be SSA form.
 * \param stmt The source statement to be converted.
 * \return The converted form.
 */
144
TVM_DLL Stmt ConvertSSA(Stmt stmt);
tqchen committed
145 146

/*!
147 148 149 150 151
 * \brief Substitute the var specified in key->var to be value.
 * \param stmt The source statement to be substituted
 * \param value_map The map of new values.
 * \return The converted form.
 */
152
Stmt Substitute(Stmt stmt,
153
                const std::unordered_map<const VarNode*, PrimExpr>& value_map);
154 155 156 157 158 159 160

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param expr The source expression to be substituted
 * \param value_map The map of new values.
 * \return The converted expression.
 */
161 162
PrimExpr Substitute(PrimExpr expr,
                const std::unordered_map<const VarNode*, PrimExpr>& value_map);
163 164 165 166 167 168 169

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param stmt The source statement to be substituted
 * \param value_map The map of new values.
 * \return The converted form.
 */
170
Stmt Substitute(Stmt stmt, const Map<Var, PrimExpr>& value_map);
171 172 173 174 175 176 177

/*!
 * \brief Substitute the var specified in key->var to be value.
 * \param expr The source expression to be substituted
 * \param value_map The map of new values.
 * \return The converted expression.
 */
178
PrimExpr Substitute(PrimExpr expr, const Map<Var, PrimExpr>& value_map);
179 180

/*!
tqchen committed
181 182
 * \brief inline all calls of f in stmt.
 *
183
 * \param stmt The statement to apply inline optimization.
tqchen committed
184 185
 * \param f The function reference to be inlined
 * \param args The arguments variable of the function.
186
 * \param body The definition body of the function.
tqchen committed
187 188 189 190
 * \return The result stmt
 *
 * \note All the passes in this file uses SSA form and outputs SSA form.
 */
191 192
Stmt Inline(Stmt stmt,
            FunctionRef f,
tqchen committed
193
            Array<Var> args,
194
            PrimExpr body);
195 196 197 198 199 200 201 202

/*!
 * \brief Flatten the multi-dimensional read/write
 *  to single dimensional Load/Store
 *
 * \param stmt The stmt to be trasnformed.
 * \param extern_buffer Map specifies external
 *    buffer assignment of input and outputs.
203
 * \param cache_line_size The size of CPU cache line.
204
 * \param create_bound_attribute Whether to create bound attributes.
205
 * \return Transformed stmt.
206 207
 */
Stmt StorageFlatten(Stmt stmt,
208
                    Map<te::Tensor, Buffer> extern_buffer,
209 210
                    int cache_line_size,
                    bool create_bound_attribute = false);
211 212 213 214 215 216 217 218 219 220 221

/*!
 * \brief Try to modify the AST to support TensorCore
 *
 * \param stmt The stmt to be trasnformed.
 * \param schedule The original schedule.
 * \param extern_buffer Map specifies external
 *    buffer assignment of input and outputs.
 * \return Transformed stmt.
 */
Stmt RewriteForTensorCore(Stmt stmt,
222 223
                          te::Schedule schedule,
                          Map<te::Tensor, Buffer> extern_buffer);
224

225 226 227 228 229 230 231 232
/*!
 * \brief Verify if there is any argument bound to compact buffer.
 *
 * \param stmt The stmt to be verified.
 * \return true if there is any buffer_bind_scope attribute found,
 *        otherwise, false.
 */
bool VerifyCompactBuffer(Stmt stmt);
tqchen committed
233

234
/*!
Tianqi Chen committed
235 236 237 238 239 240 241
 * \brief Remove No Op from the Stmt.
 * \param stmt The stmt to be trasnformed
 * \return Transformed stmt.
 */
Stmt RemoveNoOp(Stmt stmt);

/*!
242 243 244
 * \brief unroll the constant loop marked by unroll.
 * This pass also automatically attach pragma unroll tag to loops which meets the standard.
 *
245
 * \param stmt The statment to be unrolled.
246
 * \param auto_max_step The maximum step before stop attach automatic unroll
247
 * \param auto_max_depth The maximum depth before stop attach automatic unroll
248
 * \param auto_max_extent The maximum extent of the loop we can unroll,
Siju committed
249
 *                     this is an legacy option that do not take the loop total steps into account.
250
 * \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen.
251
 * \return Transformed stmt.
252
 */
253 254
Stmt UnrollLoop(Stmt stmt,
                int auto_max_step,
255
                int auto_max_depth,
256 257
                int auto_max_extent,
                bool explicit_unroll);
258 259

/*!
260
 * \brief vectorize the constant loops
261
 * \param stmt The statement to be vectorized.
262
 * \return Transformed stmt.
263 264 265 266
 */
Stmt VectorizeLoop(Stmt stmt);

/*!
267 268 269 270 271 272 273
 * \brief convert vectorized loops into serialized loops
 * \param stmt The statement to skip vectorization on.
 * \return Transformed stmt.
 */
Stmt SkipVectorize(Stmt stmt);

/*!
274
* \brief instruments bound checkers.
275 276
* \param stmt The statement to be instrumented.
* \return Instrumented stmt.
277 278 279 280
*/
Stmt InstrumentBoundCheckers(Stmt stmt);

/*!
281
 * \brief Inject virtual thread loops into stmt.
282
 * \param stmt The statement to be transformed.
283 284 285 286 287
 * \return Transformed stmt.
 */
Stmt InjectVirtualThread(Stmt stmt);

/*!
288
 * \brief Inject prefetch instructions into stmt.
289
 * \param stmt The statement to be transformed.
290 291 292 293 294
 * \return Transformed stmt.
 */
Stmt InjectPrefetch(Stmt stmt);

/*!
295
 * \brief Inject double buffer into stmt.
296
 * \param stmt The statement to be transformed.
297
 * \param split_loop Loop splitting factor.
298 299
 * \return Transformed stmt.
 */
300
Stmt InjectDoubleBuffer(Stmt stmt, int split_loop);
301 302

/*!
303 304
 * \brief Inject copy intrinsics with optional pad.
 *
305
 * \param stmt The statement to be transformed.
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
 * \param pragma_key The pragma key for hint of copy.
 * \param fintrin The function with signature
 *
 *   Stmt fintrin(Buffer src,
 *                Buffer dst,
 *                Array<Expr> pad_before,
 *                Array<Expr> pad_after,
 *                Expr pad_value)
 * \return Transformed stmt.
 */
Stmt InjectCopyIntrin(Stmt stmt,
                      const std::string& pragma_key,
                      const runtime::PackedFunc& fintrin);

/*!
321 322 323 324
 * \brief Rewrite storage allocation pattern.
 *  Moves the allocation to outer most possible scope.
 *  Trying to share space between allocations to make
 *  a static allocation plan when possible.
325
 *
326
 * \param stmt The stmt to be transformed
327 328
 * \return Transformed stmt.
 */
329
Stmt StorageRewrite(Stmt stmt);
330 331

/*!
332 333
 * \brief partition loops in the stmt
 * \param stmt The stmt to do loop partition
334
 * \param split_const_loop flag to enable partition for const loop
335 336
 * \return Transformed stmt.
 */
337
Stmt LoopPartition(Stmt stmt, bool split_const_loop);
338 339

/*!
340 341
 * \brief Detect and insert sync points to co-processor.
 *
342
 * \param stmt The stmt to be transformed
343 344 345 346 347
 * \return Transformed stmt.
 */
Stmt CoProcSync(Stmt stmt);

/*!
348 349
 * \brief Lift common attrs with attr_key to outer scope.
 *
350
 * \param stmt The stmt to be transformed
351 352 353 354 355 356
 * \param attr_key The attribute key to be checked.
 * \return Transformed stmt.
 */
Stmt LiftAttrScope(Stmt stmt, std::string attr_key);

/*!
357
 * \brief Detect and rewrite unsafe select that contains memory access.
358
 * \param stmt The statement to be rewritten.
359 360 361 362 363
 * \return Transformed stmt.
 */
Stmt RewriteUnsafeSelect(Stmt stmt);

/*!
364 365 366
 * \brief Lower attached storage access information.
 * Do this pass after all storage access analysis finish.
 *
367
 * \param stmt The stmt to be transformed
368 369 370 371 372
 * \return Transformed stmt.
 */
Stmt LowerStorageAccessInfo(Stmt stmt);

/*!
373
 * \brief Decorate the stmt with a device scope, this is helpful for
374 375
 * hardware accelerator without thread blocks.
 *
376
 * \param stmt The stmt to be transformed
377 378 379 380 381
 * \return Transformed stmt.
 */
Stmt DecorateDeviceScope(Stmt stmt);

/*!
382 383 384 385 386 387 388
 * \brief Loop invariant code motion which locates and hoists if statements.
 * \param stmt The stmt to do if statement hoisting.
 * \return Transformed stmt.
 */
Stmt HoistIfThenElse(Stmt stmt);

/*!
389 390 391
 * \brief Make an user callable API LoweredFunc.
 *
 *  The main task of this function is to create code to :
392
 *   - Map the values in the api_args to Var that is required by body.
393 394 395 396 397
 *   - Insert assertions to check type/value of the passed arguments.
 *
 * \param body The body of the function.
 * \param name The name of the function.
 * \param api_args Arguments to the function, can be either Var, or Buffer
398 399
 * \param num_unpacked_args Number of arguments that
 *         are processed in plain form instead of packed form.
400 401 402
 * \param is_restricted Whether the caller can guarantee that each buffer argument do not overlap.
 *  It is recommended to set to true for optimized code if such invariant holds.
 *
403 404 405
 * \return a LoweredFunc with the specified signiture.
 *
 * \note
406
 *  The function signature have two cases
407
 *
408 409
 *  let num_packed_args = len(api_args) - num_unpacked_args;
 *
410 411 412 413 414
 *  if num_packed_args is zero:
 *     f(api_arg_0, api_arg_1, .., api_arg_n) where n == len(api_args)
 *
 *  if num_packed_args is not zero:
 *       f(TVMArg* packed_args, int* packed_arg_type_ids, int num_packed_args,
415 416
 *         api_arg_k, api_arg_k+1, ... api_arg_n,
 *         TVMValue* out_ret_val, int* out_ret_tcode)
417 418 419 420 421 422 423
 *
 *       where n == len(api_args), k == num_packed_args
 *
 *  There is no thread_axis in generated function.
 */
LoweredFunc MakeAPI(Stmt body,
                    std::string name,
424
                    Array<ObjectRef> api_args,
425 426
                    int num_unpacked_args,
                    bool is_restricted);
427 428

/*!
429 430 431 432 433 434 435 436
 * \brief Bind the device type of host function to be device_type.
 * \param func The function to be binded.
 * \param device_type The device type to be binded.
 * \return The binded function.
 */
LoweredFunc BindDeviceType(LoweredFunc func,
                           int device_type);
/*!
437 438 439 440
 * \brief Find undefined vars in the statment.
 * \param stmt The function to be checked.
 * \param defs The vars that is defined.
 * \return Array of undefined vars.
441
 */
442
Array<Var> UndefinedVars(const Stmt& stmt, const Array<Var>& defs);
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458

/*!
 * \brief Split the function into a host function and device functions.
 * \param func The function to be splitted.
 *
 * \return Array of functions, the first one is host function,
 *     the others are device functions.
 */
Array<LoweredFunc> SplitHostDevice(LoweredFunc func);

/*!
 * \brief Insert sync between parallel read/write of shared buffers.
 *
 * \param stmt The stmt to be trasnformed.
 * \param storage_scope The storage scope considered.
 */
459
LoweredFunc ThreadSync(LoweredFunc stmt, std::string storage_scope);
460

461 462 463 464 465 466 467
/*!
 * \brief Lower cross thread alleduce in the stmt.
 * \param f The device function to be lowered.
 * \param warp_size the size of warp where no sync is needed.
 * \return Transformed function.
 */
LoweredFunc LowerThreadAllreduce(LoweredFunc f, int warp_size);
468 469

/*!
470 471 472 473 474 475 476 477 478
 * \brief Lower warp memory in stmt.
 * \param f The device function to be lowered.
 * \param warp_size the size of warp where no sync is needed.
 *        this function will only take in effect if warp_size is bigger than one.
 * \return Transformed function.
 */
LoweredFunc LowerWarpMemory(LoweredFunc f, int warp_size);

/*!
479 480 481 482 483 484 485 486 487 488 489
 * \brief Remap the thread axis
 *
 *  This can be used to get equivalent program which uses
 *  threadIdx.y in place of threadIdx.x by passing
 *  {"threadIdx.x": thread_axis("threadIdx.y")}
 *
 *
 * \param f The device function to be lowered.
 * \param axis_map The map from StringImm -> ItrVar
 * \return Transformed function.
 */
490
LoweredFunc RemapThreadAxis(LoweredFunc f, Map<PrimExpr, IterVar> axis_map);
491 492

/*!
493 494 495 496
 * \brief Lower packed function call.
 * \param f The function to be lowered.
 * \return Transformed function.
 */
497
LoweredFunc LowerTVMBuiltin(LoweredFunc f);
498 499

/*!
500 501 502 503 504 505 506
 * \brief Combine context function calls.
 * \param f The host function to be lowered.
 * \return Transformed function.
 */
LoweredFunc CombineContextCall(LoweredFunc f);

/*!
507 508 509 510 511 512 513 514 515 516 517 518
 * \brief Rewrite the pointer content type of arguments,
 *  as well as Alloc internal to the function to use
 *  the most frequently accessed type for load/store
 *  to avoid pointer casting in backend when possible.
 *
 * \note implemeneted in storage_rewrite.cc
 * \param f The function to be trasnformed
 * \return Transformed function.
 */
LoweredFunc PointerValueTypeRewrite(LoweredFunc f);

/*!
519 520 521 522 523 524 525 526 527
 * \brief Lower attached storage access information on device.
 * Do this pass after all storage access analysis finish.
 *
 * \param func The device function to be lowered.
 * \return Transformed function.
 */
LoweredFunc LowerDeviceStorageAccessInfo(LoweredFunc func);

/*!
528 529 530 531 532 533
 * \brief Lower intrinsic function calls.
 * \param f The device function to be lowered.
 * \param target The target device.
 * \return Transformed function.
 */
LoweredFunc LowerIntrin(LoweredFunc f, const std::string& target);
534 535

/*!
536 537 538 539 540 541 542 543 544 545 546
 * \brief Lower custom datatypes.
 *
 * See tvm::datatypes::Registry for more information on adding custom datatypes.
 *
 * \param f The device function to be lowered.
 * \param target The target device.
 * \return Transformed function.
 */
LoweredFunc LowerCustomDatatypes(LoweredFunc f, const std::string& target);

/*!
547 548 549 550 551 552 553 554
 * \brief Infer the TensorCore fragment infomation using tensor intrinsics
 *
 * \param f The device function to be lowered.
 * \return Transformed function.
 */
LoweredFunc InferFragment(LoweredFunc f);

/*!
555 556 557 558 559 560 561
 * \brief skip assert stmt generation
 * \param f The function to be transformed.
 * \return Transformed function.
 */
LoweredFunc SkipAssert(LoweredFunc f);

/*!
562 563 564 565 566 567 568 569 570 571 572 573
 * \brief Verify if memory accesses are legal for a specific target device type.
 *
 *  In the case that tgt is cuda, if not all workload is bound with
 *  threads, CPU code is generated that tries to access GPU memory,
 *  which is illegal. This pass performs verification for this case.
 *
 * \param func The function to be verified.
 * \param device_type The target device type.
 * \return Success of memory verification.
 */
bool VerifyMemory(LoweredFunc func, int device_type);

574 575 576 577 578 579 580 581 582 583 584

/*!
 * \brief Verify the correctness of a GPU code
 *        It will check the whether the amount of memory usage or the number of threads
 *        in a block exceeds the limit
 * \param stmt The statement to be checked
 * \param constraints The dict to specify constraints to check.
 *        Possible keys are
 *
 *        "max_local_memory_per_block": Total amount of local memory per block (in bytes).
 *        "max_shared_memory_per_block": Total amount of shared memory per block (in bytes).
585
 *        "max_threads_per_block": Maximum number of threads per block.
586 587 588 589 590 591 592 593 594
 *        "max_thread_x": Maximum length of threadIdx.x.
 *        "max_thread_y": Maximum length of threadIdx.y.
 *        "max_thread_z": Maximum length of threadIdx.z.
 *
 *        If one key is missing in this argument, the pass won't check for that item.
 * \return valid Whether it is a valid GPU code
 *
 */
bool VerifyGPUCode(Stmt stmt,
595
                   Map<std::string, PrimExpr> constraints);
596

597
}  // namespace tir
tqchen committed
598
}  // namespace tvm
599
#endif  // TVM_TIR_IR_PASS_H_