Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
302c2e64
Commit
302c2e64
authored
Jan 08, 2017
by
tqchen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Make Tensor comparator and hash to be aware of same op and index, init checkin of the ir generation
parent
eee0ebef
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
311 additions
and
6 deletions
+311
-6
include/tvm/operation.h
+13
-0
include/tvm/tensor.h
+17
-0
python/tvm/_ctypes/_api.py
+4
-1
python/tvm/tensor.py
+30
-0
src/c_api/c_api_lang.cc
+17
-0
src/pass/schedule_ops.cc
+214
-0
src/schedule/bound.cc
+5
-1
src/schedule/int_set.cc
+5
-3
tests/python/test_tensor.py
+6
-1
No files found.
include/tvm/operation.h
View file @
302c2e64
...
...
@@ -86,4 +86,17 @@ inline Tensor Compute(Array<Expr> shape,
}
// namespace tvm
namespace
std
{
template
<>
struct
hash
<::
tvm
::
Tensor
>
{
std
::
size_t
operator
()(
const
::
tvm
::
Tensor
&
k
)
const
{
if
(
k
.
defined
()
&&
k
->
op
.
defined
())
{
return
k
->
op
.
hash
();
}
else
{
return
k
.
hash
();
}
}
};
}
// namespace std
#endif // TVM_OPERATION_H_
include/tvm/tensor.h
View file @
302c2e64
...
...
@@ -47,6 +47,12 @@ class Tensor : public FunctionRef {
* \return the pointer to the internal node container
*/
inline
const
TensorNode
*
operator
->
()
const
;
/*!
* \brief check if two tensors equals each other.
* \param other tensor to be checked.
* \return whether the two tensors equals each other.
*/
inline
bool
operator
==
(
const
Tensor
&
other
)
const
;
/*! \return The dimension of the tensor */
inline
size_t
ndim
()
const
;
/*!
...
...
@@ -201,6 +207,17 @@ inline size_t Tensor::ndim() const {
return
(
*
this
)
->
shape
.
size
();
}
inline
bool
Tensor
::
operator
==
(
const
Tensor
&
other
)
const
{
if
(
get
()
==
other
.
get
())
return
true
;
if
(
get
()
==
nullptr
||
other
.
get
()
==
nullptr
)
return
false
;
if
((
*
this
)
->
op
.
defined
()
||
other
->
op
.
defined
())
{
return
(
*
this
)
->
op
==
other
->
op
&&
(
*
this
)
->
value_index
==
other
->
value_index
;
}
else
{
return
false
;
}
}
// macro to turn every operation of slice to expression
#define DEFINE_OVERLOAD_SLICE_UNARY_OP(Op) \
inline Expr operator Op (const Tensor::Slice& a) { \
...
...
python/tvm/_ctypes/_api.py
View file @
302c2e64
...
...
@@ -29,9 +29,12 @@ kNodeHandle = 4
def
_type_key
(
handle
):
ret_val
=
ArgVariant
()
ret_typeid
=
ctypes
.
c_int
()
ret_success
=
ctypes
.
c_int
()
check_call
(
_LIB
.
TVMNodeGetAttr
(
handle
,
c_str
(
"type_key"
),
ctypes
.
byref
(
ret_val
),
ctypes
.
byref
(
ret_typeid
)))
ctypes
.
byref
(
ret_val
),
ctypes
.
byref
(
ret_typeid
),
ctypes
.
byref
(
ret_success
)))
return
py_str
(
ret_val
.
v_str
)
NODE_TYPE
=
{
...
...
python/tvm/tensor.py
View file @
302c2e64
from
__future__
import
absolute_import
as
_abs
from
._ctypes._api
import
NodeBase
,
SliceBase
,
register_node
,
convert
from
.
import
collections
as
_collections
from
.
import
_function_internal
from
.
import
make
as
_make
from
.
import
expr
as
_expr
...
...
@@ -38,6 +39,35 @@ class Tensor(NodeBase):
def
__getitem__
(
self
,
indices
):
return
TensorSlice
(
self
,
indices
)
def
__hash__
(
self
):
return
_function_internal
.
_TensorHash
(
self
)
def
__eq__
(
self
,
other
):
if
not
isinstance
(
other
,
Tensor
):
return
False
return
_function_internal
.
_TensorEqual
(
self
,
other
)
@property
def
ndim
(
self
):
return
len
(
self
.
shape
)
class
Operation
(
NodeBase
):
def
output
(
self
,
index
):
"""Get the index-th output of the operation
Parameters
----------
index : int
The index size.
Returns
-------
out : Tensor
The i-th output.
"""
return
_function_internal
.
_OpGetOutput
(
self
,
index
)
@register_node
class
ComputeOp
(
Operation
):
pass
src/c_api/c_api_lang.cc
View file @
302c2e64
...
...
@@ -149,6 +149,17 @@ TVM_REGISTER_API(_Tensor)
args
.
at
(
4
));
});
TVM_REGISTER_API
(
_TensorEqual
)
.
set_body
([](
const
ArgStack
&
args
,
RetValue
*
ret
)
{
*
ret
=
args
.
at
(
0
).
operator
Tensor
()
==
args
.
at
(
1
).
operator
Tensor
();
});
TVM_REGISTER_API
(
_TensorHash
)
.
set_body
([](
const
ArgStack
&
args
,
RetValue
*
ret
)
{
*
ret
=
static_cast
<
int64_t
>
(
std
::
hash
<
Tensor
>
()(
args
.
at
(
0
).
operator
Tensor
()));
});
TVM_REGISTER_API
(
_ComputeOp
)
.
set_body
([](
const
ArgStack
&
args
,
RetValue
*
ret
)
{
*
ret
=
ComputeOpNode
::
make
(
args
.
at
(
0
),
...
...
@@ -156,6 +167,12 @@ TVM_REGISTER_API(_ComputeOp)
args
.
at
(
2
));
});
TVM_REGISTER_API
(
_OpGetOutput
)
.
set_body
([](
const
ArgStack
&
args
,
RetValue
*
ret
)
{
*
ret
=
args
.
at
(
0
).
operator
Operation
().
output
(
args
.
at
(
1
).
operator
size_t
());
});
TVM_REGISTER_API
(
_IterVar
)
.
set_body
([](
const
ArgStack
&
args
,
RetValue
*
ret
)
{
...
...
src/pass/schedule_ops.cc
View file @
302c2e64
...
...
@@ -5,11 +5,225 @@
#include <tvm/ir.h>
#include <tvm/ir_mutator.h>
#include <tvm/ir_pass.h>
#include <tvm/ir_visitor.h>
#include "./scope.h"
namespace
tvm
{
namespace
ir
{
namespace
{
/*!
* \brief use message passing to calculate the assignment of each Var inside the loop body.
* \param s The schedule to be used.
* \param dom_map The domain map of each iteration variable's domain
* \param p_state The message passing state
* IterVar->The assignment.
*/
void
PassUpOffset
(
const
Schedule
&
s
,
const
std
::
unordered_map
<
IterVar
,
Range
>&
dom_map
,
std
::
unordered_map
<
IterVar
,
Expr
>*
p_state
)
{
auto
&
state
=
*
p_state
;
for
(
size_t
i
=
s
->
relations
.
size
();
i
!=
0
;
--
i
)
{
IterVarRelation
rel
=
s
->
relations
[
i
-
1
];
if
(
rel
.
as
<
SplitNode
>
())
{
const
SplitNode
*
s
=
rel
.
as
<
SplitNode
>
();
Expr
outer
=
state
.
at
(
s
->
outer
);
Expr
inner
=
state
.
at
(
s
->
outer
);
Expr
factor
=
dom_map
.
at
(
s
->
outer
)
->
extent
;
Expr
offset
=
inner
+
outer
*
factor
;
Expr
outer_min
=
dom_map
.
at
(
s
->
parent
)
->
min
;
if
(
!
is_zero
(
outer_min
))
{
offset
=
outer_min
+
offset
;
}
state
[
s
->
parent
]
=
offset
;
}
else
if
(
rel
.
as
<
FuseNode
>
())
{
const
FuseNode
*
s
=
rel
.
as
<
FuseNode
>
();
Expr
value
=
state
.
at
(
s
->
fused
);
Expr
factor
=
dom_map
.
at
(
s
->
outer
)
->
extent
;
state
[
s
->
outer
]
=
value
/
factor
;
state
[
s
->
inner
]
=
value
%
factor
;
}
else
{
LOG
(
FATAL
)
<<
"unknown relation type"
;
}
}
}
/*!
* \brief split the expr by addition.
* \param expr The expression to be splitted.
* \param loop_level The loop level of each Variable
* \param result vector of (level, expr)
* The level gives the mimimum loop level this expression need to be computed.
* The Expr gives the expression content.
*/
void
SplitByAdd
(
Expr
expr
,
const
std
::
unordered_map
<
const
Variable
*
,
size_t
>&
loop_level
,
std
::
vector
<
std
::
pair
<
size_t
,
Expr
>
>
*
result
)
{
const
Add
*
op
=
expr
.
as
<
Add
>
();
if
(
op
!=
nullptr
)
{
SplitByAdd
(
op
->
a
,
loop_level
,
result
);
SplitByAdd
(
op
->
b
,
loop_level
,
result
);
}
else
{
size_t
max_level
=
0
;
auto
fvisit
=
[
&
max_level
,
&
loop_level
](
const
NodeRef
&
n
)
{
const
Variable
*
op
=
n
.
as
<
Variable
>
();
if
(
op
!=
nullptr
)
{
auto
it
=
loop_level
.
find
(
op
);
if
(
it
!=
loop_level
.
end
())
{
max_level
=
std
::
max
(
max_level
,
it
->
second
);
}
}
};
PostOrderVisit
(
expr
,
fvisit
);
result
->
push_back
(
std
::
make_pair
(
max_level
,
expr
));
}
}
/*!
* \brief combine the nest stmt, whose body is not defined.
* \param nest A list of For and LetStmt, whose body is not defined.
* \param body body
*/
Stmt
CombineNest
(
std
::
vector
<
Stmt
>&&
nest
,
Stmt
body
)
{
while
(
!
nest
.
empty
())
{
Stmt
s
=
std
::
move
(
nest
.
back
());
nest
.
pop_back
();
if
(
s
.
as
<
For
>
())
{
auto
n
=
std
::
make_shared
<
For
>
(
*
s
.
as
<
For
>
());
n
->
body
=
body
;
body
=
Stmt
(
n
);
}
else
if
(
s
.
as
<
LetStmt
>
())
{
auto
n
=
std
::
make_shared
<
LetStmt
>
(
*
s
.
as
<
LetStmt
>
());
n
->
body
=
body
;
body
=
Stmt
(
n
);
}
else
if
(
s
.
as
<
AttrStmt
>
())
{
auto
n
=
std
::
make_shared
<
AttrStmt
>
(
*
s
.
as
<
AttrStmt
>
());
n
->
body
=
body
;
body
=
Stmt
(
n
);
}
else
{
LOG
(
FATAL
)
<<
"not supported nest type"
;
}
}
return
body
;
}
/*!
* \brief Make the loop nest of the correspondings schedule.
* \param sch The schedule.
* \param dom_map The domain map.
*/
std
::
vector
<
Stmt
>
MakeLoopNest
(
const
Schedule
&
sch
,
const
std
::
unordered_map
<
IterVar
,
Range
>&
dom_map
)
{
// optional, use let to define some CSE in dom_map.
auto
leaf_iter_vars
=
sch
->
leaf_iter_vars
;
std
::
unordered_map
<
IterVar
,
Expr
>
offset
;
std
::
unordered_map
<
const
Variable
*
,
size_t
>
loop_level
;
// create the loop nest
std
::
vector
<
Stmt
>
nest
;
nest
.
resize
(
leaf_iter_vars
.
size
()
+
1
,
Stmt
());
for
(
size_t
i
=
0
;
i
<
leaf_iter_vars
.
size
();
++
i
)
{
auto
iv
=
leaf_iter_vars
[
i
];
// initialize the offset and loop_level
offset
[
iv
]
=
iv
->
var
;
loop_level
[
iv
->
var
.
as
<
Variable
>
()]
=
i
+
1
;
nest
[
i
]
=
AttrStmt
::
make
(
iv
->
var
,
"scope"
,
iv
,
Stmt
());
if
(
iv
->
thread_tag
.
length
()
==
0
)
{
Range
dom
=
dom_map
.
at
(
iv
);
nest
[
i
]
=
For
::
make
(
iv
->
var
,
dom
->
min
,
dom
->
extent
,
ForType
::
Serial
,
DeviceAPI
::
None
,
nest
[
i
]);
}
}
// message passing to get offset of root iter vars.
PassUpOffset
(
sch
,
dom_map
,
&
offset
);
for
(
IterVar
iv
:
sch
->
op
->
root_iter_vars
())
{
Expr
value
=
offset
.
at
(
iv
);
if
(
value
.
same_as
(
iv
->
var
))
continue
;
using
Entry
=
std
::
pair
<
size_t
,
Expr
>
;
std
::
vector
<
Entry
>
splits
;
SplitByAdd
(
value
,
loop_level
,
&
splits
);
Expr
offset
=
0
;
for
(
size_t
i
=
0
;
i
<=
leaf_iter_vars
.
size
();
++
i
)
{
auto
iv
=
leaf_iter_vars
[
i
];
for
(
const
auto
&
kv
:
splits
)
{
if
(
kv
.
first
==
i
)
{
offset
=
offset
+
splits
[
i
].
second
;
}
}
std
::
ostringstream
os
;
os
<<
iv
->
var
->
name_hint
<<
".at.l"
<<
i
;
Var
base_offset
(
os
.
str
());
nest
[
i
]
=
LetStmt
::
make
(
base_offset
,
offset
,
nest
[
i
]);
offset
=
base_offset
;
}
nest
.
back
()
=
LetStmt
::
make
(
iv
->
var
,
offset
,
nest
.
back
());
}
return
nest
;
}
/*!
* \brief Make the loop nest of the correspondings schedule.
* \param op The operation.
*/
Stmt
MakeBody
(
const
Operation
&
op
)
{
Stmt
body
;
if
(
op
.
as
<
ComputeOpNode
>
())
{
const
ComputeOpNode
*
compute
=
op
.
as
<
ComputeOpNode
>
();
// Note: Tensor's address cannot uniquely
Tensor
t
=
op
.
output
(
0
);
Array
<
Expr
>
args
;
for
(
IterVar
iv
:
compute
->
axis
)
{
args
.
push_back
(
iv
->
var
);
}
body
=
Provide
::
make
(
t
,
{
compute
->
body
},
args
);
}
else
{
LOG
(
FATAL
)
<<
"not supported op"
;
}
return
body
;
}
Stmt
MakePipeline
(
const
Schedule
&
sch
,
Stmt
body
)
{
return
body
;
}
// inject the operator's realization on the stmt.
class
InjectRealize
:
public
IRMutator
{
public
:
explicit
InjectRealize
(
Schedule
sch
)
:
sch_
(
sch
)
{}
Stmt
Mutate
(
Stmt
stmt
)
final
{
const
AttrStmt
*
op
=
stmt
.
as
<
AttrStmt
>
();
if
(
op
!=
nullptr
)
{
attr_scope_
.
Push
({
op
->
node
,
op
->
type_key
},
op
->
value
);
stmt
=
IRMutator
::
Mutate
(
stmt
);
attr_scope_
.
Pop
({
op
->
node
,
op
->
type_key
});
}
else
{
stmt
=
IRMutator
::
Mutate
(
stmt
);
}
if
(
op
!=
nullptr
&&
op
->
type_key
==
"scope"
&&
op
->
node
==
sch_
->
attach_parent
)
{
return
AttrStmt
::
make
(
op
->
node
,
op
->
type_key
,
op
->
value
,
MakePipeline
(
sch_
,
op
->
body
));
}
else
{
return
stmt
;
}
}
private
:
// the operations to be carried
Schedule
sch_
;
Scope
<
AttrKey
,
Expr
>
attr_scope_
;
};
}
// namespace
}
// namespace ir
}
// namespace tvm
src/schedule/bound.cc
View file @
302c2e64
...
...
@@ -101,7 +101,11 @@ void PassToOperation(
const
Tensor
&
tensor
,
const
std
::
vector
<
IntSet
>&
dim_bounds
,
std
::
unordered_map
<
IterVar
,
std
::
vector
<
IntSet
>
>*
result
)
{
// This is a push style operation, given output bound, push to the op IterVar bound.
// It cannot handle complicated cases where op bound is coupled with bounds of
// all of its outputs, without having a simple communicative union relation.
//
// Eventually, we need to change the inference to be a Pull style inference
if
(
tensor
->
op
.
as
<
ComputeOpNode
>
())
{
auto
root_iter_vars
=
tensor
->
op
->
root_iter_vars
();
CHECK_EQ
(
tensor
.
ndim
(),
root_iter_vars
.
size
());
...
...
src/schedule/int_set.cc
View file @
302c2e64
...
...
@@ -220,23 +220,25 @@ void PassUp(const SplitNode* s,
*
parent
=
IntSet
::
make_range
(
dom_map
.
at
(
s
->
parent
));
return
;
}
Expr
factor
=
dom_map
.
at
(
s
->
outer
)
->
extent
;
CHECK
(
outer
.
defined
());
CHECK
(
inner
.
defined
());
CHECK
(
factor
.
defined
());
// copy construct
auto
n
=
std
::
make_shared
<
IntSetNode
>
(
*
(
inner
.
operator
->
()));
if
(
IsNumber
(
outer
))
{
// shift the base offset
n
->
base
=
Range
::
make_with_min_extent
(
AsNumber
(
outer
)
*
s
->
factor
+
inner
->
base
->
min
,
AsNumber
(
outer
)
*
factor
+
inner
->
base
->
min
,
inner
->
base
->
extent
);
}
else
{
// default use all domains in the data.
n
->
domain
.
push_back
(
outer
->
base
);
n
->
stride
.
push_back
(
s
->
factor
);
n
->
stride
.
push_back
(
factor
);
for
(
size_t
i
=
0
;
i
<
outer
->
domain
.
size
();
++
i
)
{
n
->
domain
.
push_back
(
outer
->
domain
[
i
]);
n
->
stride
.
push_back
(
outer
->
stride
[
i
]
*
s
->
factor
);
n
->
stride
.
push_back
(
outer
->
stride
[
i
]
*
factor
);
}
}
*
parent
=
IntSet
(
n
);
...
...
tests/python/test_tensor.py
View file @
302c2e64
...
...
@@ -11,6 +11,12 @@ def test_tensor():
print
(
T
.
op
.
body
)
assert
(
tuple
(
T
.
shape
)
==
(
m
,
n
,
l
))
assert
(
A
.
op
is
None
)
assert
(
A
==
A
)
assert
(
T
.
op
.
output
(
0
)
==
T
)
assert
(
T
.
op
.
output
(
0
)
.
__hash__
()
==
T
.
__hash__
())
d
=
{
T
.
op
.
output
(
0
)
:
1
}
assert
(
d
[
T
]
==
1
)
def
test_tensor_reduce
():
m
=
tvm
.
Var
(
'm'
)
...
...
@@ -21,7 +27,6 @@ def test_tensor_reduce():
T
=
tvm
.
compute
((
m
,
n
,
l
),
lambda
i
,
j
,
k
:
A
[
i
,
k
]
*
B
[
j
,
k
])
rv
=
tvm
.
IterVar
((
0
,
A
.
shape
[
1
]),
name
=
"k"
)
C
=
tvm
.
compute
((
m
,
n
),
lambda
i
,
j
:
tvm
.
sum
(
T
(
i
,
j
,
rv
+
1
),
rdom
=
rv
))
print
(
C
.
op
.
body
)
if
__name__
==
"__main__"
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment