Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
cbdd14f1
Commit
cbdd14f1
authored
Aug 15, 2017
by
Nicolas Vasilache
Committed by
Tianqi Chen
Aug 14, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOPI] C++ doc (#320)
parent
b0c42f3b
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
157 additions
and
29 deletions
+157
-29
HalideIR
+1
-1
topi/include/topi/broadcast.h
+115
-20
topi/include/topi/detail/broadcast.h
+12
-5
topi/include/topi/ewise.h
+8
-3
topi/include/topi/nn.h
+0
-0
topi/include/topi/tags.h
+21
-0
No files found.
HalideIR
@
326e2fa1
Subproject commit 3
0a85d860567aa30d013a5e75fbd1b0ee2ebe93
c
Subproject commit 3
26e2fa18734f0592d257da6b8cfaae90a499c5
c
topi/include/topi/broadcast.h
View file @
cbdd14f1
/*
/*
!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 by Contributors
* \brief Broadcast op constructions
* \brief Broadcast op constructions
* \file broadcast.h
* \file
topi/
broadcast.h
*/
*/
#ifndef TOPI_BROADCAST_H_
#ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_
#include <topi/detail/broadcast.h>
#include <string>
#include "topi/detail/broadcast.h"
#include "topi/tags.h"
namespace
topi
{
namespace
topi
{
inline
tvm
::
Tensor
broadcast_to
(
const
tvm
::
Tensor
&
I
,
/*!
const
tvm
::
Array
<
tvm
::
Expr
>&
output_shape
)
{
* \brief Creates an operation that broadcasts a tensor into a compatible
CHECK_GE
(
output_shape
.
size
(),
I
->
shape
.
size
())
* shape according to numpy's rules
*
* \param t The input tensor
* \param output_shape The target output shape, must be compatible
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a broadcast operation
*/
inline
tvm
::
Tensor
broadcast_to
(
const
tvm
::
Tensor
&
t
,
const
tvm
::
Array
<
tvm
::
Expr
>&
output_shape
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
CHECK_GE
(
output_shape
.
size
(),
t
->
shape
.
size
())
<<
"Not a broadcast, output dimensionality smaller than input.
\n
output: "
<<
"Not a broadcast, output dimensionality smaller than input.
\n
output: "
<<
output_shape
<<
"
\n
vs
\n
input: "
<<
I
;
<<
output_shape
<<
"
\n
vs
\n
input: "
<<
t
;
auto
bh
=
detail
::
BroadcastShape
(
output_shape
,
I
->
shape
);
auto
bh
=
detail
::
BroadcastShape
(
output_shape
,
t
->
shape
);
CHECK_EQ
(
output_shape
.
size
(),
bh
.
common_shape
.
size
());
CHECK_EQ
(
output_shape
.
size
(),
bh
.
common_shape
.
size
());
for
(
int
i
=
0
;
i
<
output_shape
.
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
output_shape
.
size
();
++
i
)
{
CHECK
(
tvm
::
ir
::
Equal
(
output_shape
[
i
],
bh
.
common_shape
[
i
]));
CHECK
(
tvm
::
ir
::
Equal
(
output_shape
[
i
],
bh
.
common_shape
[
i
]));
}
}
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
return
I
(
detail
::
InputIndexFromBroadcast
(
ovars
,
I
,
bh
.
vars2
,
bh
.
all_vars
));
return
t
(
detail
::
InputIndexFromBroadcast
(
ovars
,
t
,
bh
.
vars2
,
bh
.
all_vars
));
};
};
return
tvm
::
compute
(
return
tvm
::
compute
(
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
);
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
,
name
,
tag
);
}
}
inline
tvm
::
Tensor
broadcast_add
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise addition of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to add
* \param B The second tensor to add
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise addition with broadcast
*/
inline
tvm
::
Tensor
broadcast_add
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
+
b
;
};
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
+
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
inline
tvm
::
Tensor
broadcast_sub
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise subtraction of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to subtract from the first
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise subtraction with broadcast
*/
inline
tvm
::
Tensor
broadcast_sub
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
-
b
;
};
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
-
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
inline
tvm
::
Tensor
broadcast_mul
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise multiplication of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to multiply
* \param B The second tensor to multiply
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise multiplication with broadcast
*/
inline
tvm
::
Tensor
broadcast_mul
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
*
b
;
};
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
*
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
inline
tvm
::
Tensor
broadcast_div
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise division of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to divide the first tensor with
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise division with broadcast
*/
inline
tvm
::
Tensor
broadcast_div
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
/
b
;
};
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
/
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
inline
tvm
::
Tensor
broadcast_mod
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise modulo remainder of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to compute A % B
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise modulo remainder with
* broadcast
*/
inline
tvm
::
Tensor
broadcast_mod
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
%
b
;
};
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
%
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
}
// namespace topi
}
// namespace topi
...
...
topi/include/topi/detail/broadcast.h
View file @
cbdd14f1
/*
/*
!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 by Contributors
* \brief Detail broadcast.
* \brief Detail broadcast.
* \file broadcast.h
* \file
topi/detail/
broadcast.h
*/
*/
#ifndef TOPI_DETAIL_BROADCAST_H_
#ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_
#include <algorithm>
#include <algorithm>
#include <deque>
#include <deque>
#include <string>
#include "tvm/ir_pass.h"
#include "tvm/ir_pass.h"
#include "tvm/tvm.h"
#include "tvm/tvm.h"
...
@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(
...
@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(
template
<
typename
FBinaryExpr
>
template
<
typename
FBinaryExpr
>
inline
tvm
::
Tensor
WithBroadcast
(
FBinaryExpr
op
,
const
tvm
::
Tensor
&
A
,
inline
tvm
::
Tensor
WithBroadcast
(
FBinaryExpr
op
,
const
tvm
::
Tensor
&
B
)
{
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
""
)
{
auto
bh
=
BroadcastShape
(
A
->
shape
,
B
->
shape
);
auto
bh
=
BroadcastShape
(
A
->
shape
,
B
->
shape
);
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
return
op
(
A
(
InputIndexFromBroadcast
(
ovars
,
A
,
bh
.
vars1
,
bh
.
all_vars
)),
return
op
(
A
(
InputIndexFromBroadcast
(
ovars
,
A
,
bh
.
vars1
,
bh
.
all_vars
)),
B
(
InputIndexFromBroadcast
(
ovars
,
B
,
bh
.
vars2
,
bh
.
all_vars
)));
B
(
InputIndexFromBroadcast
(
ovars
,
B
,
bh
.
vars2
,
bh
.
all_vars
)));
};
};
return
tvm
::
compute
(
return
tvm
::
compute
(
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
);
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
,
name
,
tag
);
}
}
}
// namespace detail
}
// namespace detail
...
...
topi/include/topi/ewise.h
View file @
cbdd14f1
...
@@ -6,17 +6,22 @@
...
@@ -6,17 +6,22 @@
#ifndef TOPI_EWISE_H_
#ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_
#define TOPI_EWISE_H_
#include <tvm/tvm.h>
#include <string>
#include "topi/tags.h"
#include "tvm/tvm.h"
namespace
topi
{
namespace
topi
{
using
namespace
tvm
;
using
namespace
tvm
;
// Unary intrinsic operators
// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \
inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
return ::tvm::OpName(x(i)); \
},
"tensor", "ewise");
\
},
name, tag);
\
}
}
TOPI_DECLARE_UNARY_OP
(
exp
);
TOPI_DECLARE_UNARY_OP
(
exp
);
...
...
topi/include/topi/nn.h
View file @
cbdd14f1
This diff is collapsed.
Click to expand it.
topi/include/topi/tags.h
0 → 100644
View file @
cbdd14f1
/*!
* Copyright (c) 2017 by Contributors
* \brief Tag definitions
* \file tags.h
*/
#ifndef TOPI_TAGS_H_
#define TOPI_TAGS_H_
namespace
topi
{
constexpr
auto
kElementWise
=
"ewise"
;
constexpr
auto
kBroadcast
=
"bcast"
;
constexpr
auto
kMatMult
=
"matmult"
;
constexpr
auto
kConv2dNCHW
=
"conv2d_nchw"
;
constexpr
auto
kConv2dHWCN
=
"conv2d_hwcn"
;
constexpr
auto
kDepthwiseConv2d
=
"depthwise_conv2d"
;
constexpr
auto
kGroupConv2d
=
"group_conv2d"
;
}
// namespace topi
#endif // TOPI_TAGS_H_
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment