schedule.h 21.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*!
 *  Copyright (c) 2016 by Contributors
 * \file schedule.h
 * \brief Define a schedule.
 */
#ifndef TVM_SCHEDULE_H_
#define TVM_SCHEDULE_H_

#include <string>
#include "./base.h"
11
#include "./expr.h"
12
#include "./tensor.h"
13
#include "./tensor_intrin.h"
14 15 16

namespace tvm {

17 18
// Node container for Stage
class StageNode;
19 20
// Node container for Schedule
class ScheduleNode;
21 22
// Node container for IterVarRelation
class IterVarRelationNode;
23 24
// Attribute of itervar.
class IterVarAttrNode;
25 26 27

/*! \brief the attachment type */
enum AttachType : int {
28
  kGroupRoot = 1,
tqchen committed
29
  kInline = 2,
30 31 32
  kInlinedAlready = 3,
  kScope = 4,
  kScanUpdate = 5
33 34
};

35 36
/*! \brief Stage, contains scheduling for a stage of computation. */
class Stage : public NodeRef {
37
 public:
38 39
  Stage() {}
  explicit Stage(std::shared_ptr<Node> n) : NodeRef(n) {}
tqchen committed
40 41 42 43
  /*!
   * \brief create a new schedule for op.
   * \param op The operator in the schedule
   */
44
  explicit Stage(Operation op);
45 46 47 48
  /*!
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
49
  inline const StageNode* operator->() const;
tqchen committed
50 51 52 53
  /*!
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
54 55 56 57 58 59
  inline StageNode* operator->();
  /*!
   * \brief set the memory scope of the stage
   * \param scope The memory scope.
   */
  Stage& set_scope(std::string scope);  // NOLINT(*)
tqchen committed
60 61 62 63
  /*!
   * \brief specify the schedule to be computed at the parent schedule's scope.
   * \param parent The parent schedule.
   * \param scope The iteration point to carry the schedule.
tqchen committed
64
   * \return reference to self.
tqchen committed
65
   */
66
  Stage& compute_at(Stage parent, IterVar scope);   // NOLINT(*)
tqchen committed
67
  /*!
68
   * \brief Compute the function inline.
tqchen committed
69
   * \return reference to self.
tqchen committed
70
   */
71
  Stage& compute_inline();   // NOLINT(*)
tqchen committed
72
  /*!
73
   * \brief Compute the function at group root.
tqchen committed
74
   * \return reference to self.
tqchen committed
75
   */
76
  Stage& compute_root();  // NOLINT(*)
tqchen committed
77
  /*!
78
   * \brief Bind the ivar to thread index.
79
   *
80 81
   * \param ivar The IterVar to be binded.
   * \param thread_ivar The thread axis to be binded.
82 83
   * \return reference to self.
   */
84
  EXPORT Stage& bind(IterVar ivar, IterVar thread_ivar);
85
  /*!
86 87 88 89 90 91 92 93 94 95 96
   * \brief Set predicate under which store to the array can be performed.
   *  Use this when there are duplicated threads doing the same store and we only
   *  need one of them to do the store.
   *
   * \note This is a dangerous scheduling primitive that can change behavior of program.
   *    Only do when we are certain that thare are duplicated store.
   * \param predicate The condition to be checked.
   * \return reference to self.
   */
  Stage& set_store_predicate(Expr predicate);
  /*!
97 98 99 100
   * \brief Specify environment threads that launched around the group's scope.
   *  This can only be used in group stage.
   * \param threads The threads to be launched around the scope.
   * \note Each thread can only appear in one env_threads.
101
   *    This is a beta feature.
102 103 104
   * \return reference to self.
   */
  Stage& env_threads(Array<IterVar> threads);
105
  /*!
tqchen committed
106 107
   * \brief Split the parent by factor, generate
   * \param parent The parent iteration domain.
108
   * \param factor The split factor of the loop.
tqchen committed
109 110
   * \param p_outer The result outer domain
   * \param p_inner The result inner domain.
tqchen committed
111
   * \return reference to self.
tqchen committed
112
   */
113
  EXPORT Stage& split(IterVar parent, Expr factor, IterVar* p_outer, IterVar* p_inner);  // NOLINT(*)
tqchen committed
114
  /*!
115
   * \brief Split the iteration with given number of parts.
tqchen committed
116 117
   *
   * \param parent The parent domain.
118 119
   * \param nparts The number of parts in the outer domain.
   * \param p_outer The result outer domain.
tqchen committed
120
   * \param p_inner The result inner domain.
tqchen committed
121
   * \return reference to self.
tqchen committed
122
   */
123
  Stage& split_by_nparts(IterVar parent, Expr nparts, IterVar* p_outer, IterVar* p_inner);   // NOLINT(*)
tqchen committed
124 125 126
  /*!
   * \brief Fuse the inner outer domain to the target
   * \param outer The outer domain to be fused.
127
   * \param inner The inner domain to be fused
tqchen committed
128
   * \param p_target The result target domain.
tqchen committed
129
   * \return reference to self.
tqchen committed
130
   */
131
  Stage& fuse(IterVar outer, IterVar inner, IterVar* p_target);  // NOLINT(*)
tqchen committed
132 133 134
  /*!
   * \brief Reorder the iteration
   * \param order The order of iteration variable.
tqchen committed
135
   * \return reference to self.
tqchen committed
136
   */
137 138 139 140 141 142 143 144
  Stage& reorder(const Array<IterVar>& order);   // NOLINT(*)
  /*!
   * \brief Perform tiling on two dimensions
   *  The final loop order from outmost to inner most are
   *  [x_outer, y_outer, x_inner, y_inner]
   *
   * \param x_parent The original x dimension
   * \param y_parent The original y dimension
145 146
   * \param x_factor The stride factor on x axis
   * \param y_factor The stride factor on y axis
147 148 149 150 151 152 153
   * \param p_x_outer Outer axis of x dimension
   * \param p_y_outer Outer axis of y dimension
   * \param p_x_inner Inner axis of x dimension
   * \param p_y_inner Inner axis of y dimension
   * \return reference to self.
   */
  Stage& tile(IterVar x_parent, IterVar y_parent,   // NOLINT(*)
154
              Expr x_factor, Expr y_factor,
155
              IterVar* p_x_outer, IterVar* p_y_outer,
156
              IterVar* p_x_inner, IterVar* p_y_inner);
157
  /*!
158 159 160 161 162 163
   * \brief Vectorize iteration.
   * \param var The axis to be vectorized.
   * \return reference to self.
   */
  Stage& vectorize(IterVar var);   // NOLINT(*)
  /*!
164 165 166 167 168 169 170 171
   * \brief Replace computation of the current stage by tensor intrinsic f.
   * \param var The axis marks beginning of tensorization.
   *  Every operations inside the axis(include axis itself is tensorized).
   * \param f The Tensor compute intrinsics.
   * \return reference to self.
   */
  Stage& tensorize(IterVar var, TensorIntrin f);   // NOLINT(*)
  /*!
172
   * \brief Unroll iteration.
173
   * \param var The axis to be unrolled.
174 175 176 177
   * \return reference to self.
   */
  Stage& unroll(IterVar var);   // NOLINT(*)
  /*!
178 179 180 181 182 183
   * \brief Parallelize iteration.
   * \param var The axis to be parallelized.
   * \return reference to self.
   */
  Stage& parallel(IterVar var);   // NOLINT(*)
  /*!
184 185 186 187 188 189 190 191 192
   * \brief Annotate the iteration with pragma
   *
   * \param var The axis to be parallelized.
   * \param pragma_type The pragma type.
   *
   * \return reference to self.
   */
  Stage& pragma(IterVar var, const std::string& pragma_type);   // NOLINT(*)
  /*!
193 194 195 196 197 198 199 200
   * \brief Fetch data in advance.
   * \param domain the tensor to be prefetched
   * \param var the iteration point at which to apply prefetching
   * \param offset the number of iterations be to fetched in advance
   * \return reference to self
   */
  Stage& prefetch(const Tensor &domain, IterVar var, Expr offset); //NOLINT(*)
  /*!
201 202 203 204 205 206 207 208 209 210 211
   * \brief Set alignment requirement for specific dimension.
   *
   *  Such that stride[axis] == k * factor + offset for some k.
   *
   * \param axis The dimension to be specified for alignment.
   * \param factor The factor multiple of alignment
   * \param offset The required offset factor.
   * \return reference to self
   */
  Stage& storage_align(IterVar axis, int factor, int offset); //NOLINT(*)
  /*!
212 213 214 215 216
   * \brief Compute current stage with double buffering.
   * \return reference to self.
   */
  Stage& double_buffer();   // NOLINT(*)
  /*!
217 218 219
   * \brief whether the stage has been scheduled.
   * \return whether the stage has been scheduled.
   */
220 221 222 223 224 225 226 227 228
  bool is_scheduled() const;
  /*!
   * \brief Get attachment spec of current stage.
   *  If the stage compute at Group root, this function
   *  will traverse the group function to get the
   *  final spec from the group.
   * \return A stage representing the attach spec of the group.
   */
  Stage GetAttachSpec() const;
229 230
  // declare container type
  using ContainerType = StageNode;
231 232 233 234 235 236 237 238 239 240 241 242
};

/*!
 * \brief Global schedule container
 *  For operations and all the operations they depend on.
 *  The schedule per Operation is named as stage.
 */
class Schedule : public NodeRef {
 public:
  Schedule() {}
  explicit Schedule(std::shared_ptr<Node> n) : NodeRef(n) {}
  /*!
243 244 245 246 247
   * \brief Get a copy of current schedule.
   * \return The copied schedule.
   */
  Schedule copy() const;
  /*!
248 249 250
   * \brief Get the stage corresponds to the op
   * \param op The operation.
   */
251
  EXPORT Stage operator[](const Operation& op);
252 253 254 255 256
  /*!
   * \brief Short hand for getting the stage of tensor's operation.
   * \param tensor The tensor
   * \return The stage corresponding to the tensor's op
   */
257
  EXPORT Stage operator[](const Tensor& tensor) {
258 259 260
    return this->operator[](tensor->op);
  }
  /*!
261 262 263 264 265 266 267 268 269 270 271 272
   * \brief Create a new stage group for all intermediate
   *  operations between inputs and outputs.
   *
   * \param outputs The output boundary of the group.
   * \param inputs The input boundary of the group.
   * \param include_inputs Whether include inputs if they are reachable from outputs.
   * \return The new grouped stage.
   */
  Stage create_group(const Array<Tensor>& outputs,
                     const Array<Tensor>& inputs,
                     bool include_inputs = false);
  /*!
273 274 275 276 277 278 279 280 281 282 283 284 285 286
   * \brief create a cache read of original tensor for readers.
   *  This will mutate the body of the readers.
   *  A new stage will be created for the tensor.
   * \param tensor The tensor cached.
   * \param scope The scope of the cache.
   * \param readers The readers to redirect to the tensor.
   * \return The created tensor.
   */
  Tensor cache_read(const Tensor& tensor,
                    const std::string& scope,
                    const Array<Operation>& readers);
  /*!
   * \brief Create a cache write tensor for producing tensor.
   *  The the tensor will take over body of original tensor op.
287 288 289 290 291 292 293 294 295
   *
   *  This function can be used to do data layout transformation.
   *  If there is a split/fuse/reorder on the data parallel axis of tensor
   *  before cache_write is called. The intermediate cache stores
   *  the data in the layout as the iteration order of leave axis.
   *  The data will be transformed back to the original layout in the original tensor.
   *  User can further call compute_inline to inline the original layout and keep
   *  the data stored in the transformed layout.
   *
296 297 298 299 300 301
   * \param tensor The tensor to be produced.
   * \param scope The scope of the storage.
   * \return The created tensor.
   */
  Tensor cache_write(const Tensor& tensor, const std::string& scope);
  /*!
302 303
   * \brief Factor a reduction axis in tensor's schedule to be an explicit axis.
   * This will create a new stage that generated the new tensor with axis
304
   * as the first dimension. The tensor's body will be rewritten as a reduction
305 306
   * over the factored tensor.
   *
307 308
   *  P. Suriana, A. Adams and S. Kamil. Parallel associative reductions in halide. CGO'17
   *
309 310
   * \param tensor The tensor to be factored.
   * \param axis The reduction axis in tensor's schedule to be factored.
311
   * \return The created factored tensors.
312
   */
313 314
  Array<Tensor> rfactor(const Tensor& tensor,
                        const IterVar& axis);
315
  /*!
316 317 318 319 320 321 322
   * \brief Normalize the schedule.
   *  This is needed before bound inference.
   *  Insert necessary RebaseNode to make sure all leaf_iter_vars
   *  are in form [0, extent)
   *
   * \return A normalized schedule, can be same as current one.
   */
323
  Schedule normalize();
324
  /*!
325 326 327 328
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
  inline const ScheduleNode* operator->() const;
329 330 331 332 333
  /*!
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
  inline ScheduleNode* operator->();
334 335
  // declare container type
  using ContainerType = ScheduleNode;
336 337
};

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
/*!
 * \brief The schedule relation between IterVars
 *  can be Split, Fuse.
 */
class IterVarRelation : public NodeRef {
 public:
  IterVarRelation() {}
  explicit IterVarRelation(std::shared_ptr<Node> n) : NodeRef(n) {}
  /*!
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
  inline const IterVarRelationNode* operator->() const;
};

353 354 355 356 357 358 359 360 361 362 363 364 365 366
/*!
 * \brief Additional scheduable attributes about IterVar.
 */
class IterVarAttr : public NodeRef {
 public:
  IterVarAttr() {}
  explicit IterVarAttr(std::shared_ptr<Node> n) : NodeRef(n) {}
  /*!
   * \brief access the internal node container
   * \return the pointer to the internal node container
   */
  inline const IterVarAttrNode* operator->() const;
};

367
/*!
368
 * \brief represents a stage.
369
 *
370
 *  relations form a Directed acylic hypergraph in bipartite manner.
371 372
 *  With each node is represented by a IterVar,
 *  and each hyper-edge is represented by a IterVarRelation.
373
 *  The relations connects the IterVars in the graph.
374
 *
375 376 377 378
 *  Besides typical stage that corresponds to operations.
 *  There is also group stage, which groups stages together.
 *  Each stage's group(given by group) represent an constraint,
 *  the stage can only be attached to stages within the group.
379
 *
380
 *  The group stage node can be attached to IterVars as in normal stage.
381
 */
382
class StageNode : public Node {
383
 public:
384 385 386 387
  /*!
   * \brief The operation of stage, can be different from original op.
   *  If it is null, then this stage is a group stage.
   */
388 389 390 391 392 393 394
  Operation op;
  /*!
   * \brief The original operator.
   *  The op field can change during schedule to alternate the dataflow,
   *  while origin_op remains fixed.
   */
  Operation origin_op;
395 396
  /*! \brief All the nodes in the iter var */
  Array<IterVar> all_iter_vars;
397
  /*! \brief The current active leaf iter vars in the stage. */
398
  Array<IterVar> leaf_iter_vars;
399 400 401
  /*!
   * \brief Specify threads to be launched at the stage.
   *  This is only valid for composite ops such as Scan.
402
   * \note Experimental primitive: used for thread persistence.
403
   */
404
  Array<IterVar> env_threads;
405 406 407 408 409 410
  /*!
   * \brief The predicate under which store can happen
   *  Use this when there can be duplicated threads doing the same store.
   * \note Experimental primitive: used by cross thread-reduction.
   */
  Expr store_predicate;
411 412
  /*! \brief The relation bwteen of IterVars */
  Array<IterVarRelation> relations;
413 414
  /*! \brief additional attributes about iter var. */
  Map<IterVar, IterVarAttr> iter_var_attrs;
tqchen committed
415
  /*! \brief The attachment type of the schedule */
416
  AttachType attach_type{kGroupRoot};
417 418 419 420
  /*! \brief The attach point of this schedule. */
  IterVar attach_ivar;
  /*! \brief The stage this node attaches to */
  Stage attach_stage;
421 422
  /*! \brief The thread storage scope level of the stage */
  std::string scope;
423 424
  /*! \brief Whether this is an output stage */
  bool is_output{false};
425 426
  /*! \brief Whether apply double buffer optimization to this stage */
  bool double_buffer{false};
427 428 429 430 431 432 433
  /*!
   * \brief The parent group of the current stage.
   *  The stage cannot be assigned to stages outside the group.
   */
  Stage group;
  /*! \brief Number of direct child stages, only used for group stage.*/
  int num_child_stages{0};
434

435
  void VisitAttrs(AttrVisitor* v) final {
tqchen committed
436
    v->Visit("op", &op);
437
    v->Visit("origin_op", &origin_op);
438 439
    v->Visit("all_iter_vars", &all_iter_vars);
    v->Visit("leaf_iter_vars", &leaf_iter_vars);
440
    v->Visit("env_threads", &env_threads);
441
    v->Visit("relations", &relations);
442
    v->Visit("iter_var_attrs", &iter_var_attrs);
tqchen committed
443
    v->Visit("attach_type", &attach_type);
444 445
    v->Visit("attach_ivar", &attach_ivar);
    v->Visit("attach_stage", &attach_stage);
446
    v->Visit("scope", &scope);
447
    v->Visit("is_output", &is_output);
448
    v->Visit("double_buffer", &double_buffer);
449 450
    v->Visit("group", &group);
    v->Visit("num_child_stages", &num_child_stages);
451 452 453
  }

  static constexpr const char* _type_key = "Stage";
454
  TVM_DECLARE_NODE_TYPE_INFO(StageNode, Node);
455 456 457 458 459
};

/*! \brief node container for schedule */
class ScheduleNode : public Node {
 public:
460 461
  /*! \brief The output operations in original data flow graph */
  Array<Operation> outputs;
462
  /*!
463
   * \brief list of all stages for ops.
464
   * The stages are sorted in dependency order.
465 466
   */
  Array<Stage> stages;
467 468 469 470 471
  /*!
   * \brief List of all stage groups.
   */
  Array<Stage> groups;
  /*! \brief map of original operation to the stages */
472
  Map<Operation, Stage> stage_map;
473 474 475 476 477
  /*!
   * \brief Internal stage map to map internal ops to stages.
   *  This is created on demand and can be invalidated.
   */
  std::unordered_map<const Node*, Stage> op2stage_cache_;
478 479

  void VisitAttrs(AttrVisitor* v) final {
480
    v->Visit("outputs", &outputs);
481
    v->Visit("stages", &stages);
482
    v->Visit("groups", &groups);
483
    v->Visit("stage_map", &stage_map);
484
  }
485

486 487 488 489 490
  /*! \brief Initialize temp cache. */
  void InitCache();
  /*! \brief Invalidate temp cache. */
  void InvalidateCache();

491 492 493 494 495
  /*!
   * \brief Create a schedule for array of ops(and their dependencies).
   * \param ops The ops to be scheduled.
   * \return sch The created Schedule.
   */
496
  EXPORT static Schedule make(Array<Operation> ops);
497

498
  static constexpr const char* _type_key = "Schedule";
499
  TVM_DECLARE_NODE_TYPE_INFO(ScheduleNode, Node);
500 501
};

502 503 504 505 506 507 508 509 510
/*!
 * \brief Create a schedule for array of ops(and their dependencies).
 * \param ops The ops to be scheduled.
 * \return sch The created Schedule.
 */
inline Schedule create_schedule(Array<Operation> ops) {
  return ScheduleNode::make(ops);
}

511 512 513 514
/*! \brief node container for IterVar attr */
class IterVarAttrNode : public Node {
 public:
  /*! \brief The iteration type. */
515 516 517
  IterVarType iter_type{kDataPar};
  /*! \brief The thread this iter Var binds, can be null */
  IterVar bind_thread;
518 519 520 521
  /*! \brief List of tensor to be prefetched in this loop */
  Array<Tensor> prefetch_data;
  /*! \brief The offset used in each prefetch */
  Array<Expr> prefetch_offset;
522 523 524 525 526
  /*!
   * \brief Tensor intrinsic used in tensorization,
   *   when the axis is marked as Tensorized
   */
  TensorIntrin tensor_intrin;
527 528 529 530
  /*! \brief Alignment factor of buffer dimension */
  int dim_align_factor{0};
  /*! \brief Alignment offset of buffer dimension */
  int dim_align_offset{0};
531 532 533 534
  /*!
   * \brief Additional pragmas, array of StringImm
   */
  Array<Expr> pragmas;
535 536 537

  void VisitAttrs(AttrVisitor* v) final {
    v->Visit("iter_type", &iter_type);
538
    v->Visit("bind_thread", &bind_thread);
539 540
    v->Visit("prefetch_data", &prefetch_data);
    v->Visit("prefetch_offset", &prefetch_offset);
541
    v->Visit("tensor_intrin", &tensor_intrin);
542 543
    v->Visit("dim_align_factor", &dim_align_factor);
    v->Visit("dim_align_offset", &dim_align_offset);
544
    v->Visit("pragmas", &pragmas);
545 546 547
  }

  static constexpr const char* _type_key = "IterVarAttr";
548
  TVM_DECLARE_NODE_TYPE_INFO(IterVarAttrNode, Node);
549 550
};

551 552
/*! \brief base node of iteration var */
class IterVarRelationNode : public Node {
553 554 555
 public:
  static constexpr const char* _type_key = "IterVarRelation";
  TVM_DECLARE_BASE_NODE_INFO(IterVarRelationNode, Node);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
};

/*!
 * \brief Split the parent domain into product of
 *  outer and iter.
 */
class SplitNode : public IterVarRelationNode {
 public:
  /*! \brief The parent domain */
  IterVar parent;
  /*! \brief The outer domain */
  IterVar outer;
  /*! \brief The inner domain */
  IterVar inner;
  /*! \brief The split factor */
  Expr factor;
572 573
  /*! \brief Number of parts, only factor or nparts can be given */
  Expr nparts;
574 575 576 577 578 579

  void VisitAttrs(AttrVisitor* v) final {
    v->Visit("parent", &parent);
    v->Visit("outer", &outer);
    v->Visit("inner", &inner);
    v->Visit("factor", &factor);
580
    v->Visit("nparts", &nparts);
581 582
  }

583 584 585 586 587
  static IterVarRelation make(IterVar parent,
                              IterVar outer,
                              IterVar inner,
                              Expr factor,
                              Expr nparts);
588 589

  static constexpr const char* _type_key = "Split";
590
  TVM_DECLARE_NODE_TYPE_INFO(SplitNode, IterVarRelationNode);
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
};

/*!
 * \brief Fuse two domains into one domain.
 */
class FuseNode : public IterVarRelationNode {
 public:
  /*! \brief The outer domain */
  IterVar outer;
  /*! \brief The inner domain */
  IterVar inner;
  /*! \brief The target domain */
  IterVar fused;

  void VisitAttrs(AttrVisitor* v) final {
    v->Visit("outer", &outer);
    v->Visit("inner", &inner);
    v->Visit("fused", &fused);
  }

  static IterVarRelation make(
      IterVar outer, IterVar inner, IterVar fused);

  static constexpr const char* _type_key = "Fuse";
615
  TVM_DECLARE_NODE_TYPE_INFO(FuseNode, IterVarRelationNode);
616 617
};

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
/*!
 * \brief Rebase the iteration to make min to be 0.
 *  This is useful to normalize the Schedule
 *  to make every leaf variable's min to be 0.
 */
class RebaseNode : public IterVarRelationNode {
 public:
  /*! \brief The parent domain */
  IterVar parent;
  /*! \brief The inner domain */
  IterVar rebased;

  void VisitAttrs(AttrVisitor* v) final {
    v->Visit("parent", &parent);
    v->Visit("rebased", &rebased);
  }

  static IterVarRelation make(IterVar parent, IterVar rebased);

  static constexpr const char* _type_key = "Rebase";
638
  TVM_DECLARE_NODE_TYPE_INFO(RebaseNode, IterVarRelationNode);
639 640 641
};


642
// implementations
643 644 645 646 647 648 649
inline const StageNode* Stage::operator->() const {
  return static_cast<const StageNode*>(node_.get());
}
inline StageNode* Stage::operator->() {
  return static_cast<StageNode*>(node_.get());
}

650 651 652
inline const ScheduleNode* Schedule::operator->() const {
  return static_cast<const ScheduleNode*>(node_.get());
}
653 654 655
inline ScheduleNode* Schedule::operator->() {
  return static_cast<ScheduleNode*>(node_.get());
}
656

657 658 659 660
inline const IterVarRelationNode* IterVarRelation::operator->() const {
  return static_cast<const IterVarRelationNode*>(node_.get());
}

661 662 663
inline const IterVarAttrNode* IterVarAttr::operator->() const {
  return static_cast<const IterVarAttrNode*>(node_.get());
}
664 665
}  // namespace tvm
#endif  // TVM_SCHEDULE_H_