Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
cbdd14f1
Commit
cbdd14f1
authored
Aug 15, 2017
by
Nicolas Vasilache
Committed by
Tianqi Chen
Aug 14, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOPI] C++ doc (#320)
parent
b0c42f3b
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
358 additions
and
61 deletions
+358
-61
HalideIR
+1
-1
topi/include/topi/broadcast.h
+115
-20
topi/include/topi/detail/broadcast.h
+12
-5
topi/include/topi/ewise.h
+8
-3
topi/include/topi/nn.h
+201
-32
topi/include/topi/tags.h
+21
-0
No files found.
HalideIR
@
326e2fa1
Subproject commit 3
0a85d860567aa30d013a5e75fbd1b0ee2ebe93
c
Subproject commit 3
26e2fa18734f0592d257da6b8cfaae90a499c5
c
topi/include/topi/broadcast.h
View file @
cbdd14f1
/*
/*
!
* Copyright (c) 2017 by Contributors
* \brief Broadcast op constructions
* \file broadcast.h
* \file
topi/
broadcast.h
*/
#ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_
#include <topi/detail/broadcast.h>
#include <string>
#include "topi/detail/broadcast.h"
#include "topi/tags.h"
namespace
topi
{
inline
tvm
::
Tensor
broadcast_to
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Array
<
tvm
::
Expr
>&
output_shape
)
{
CHECK_GE
(
output_shape
.
size
(),
I
->
shape
.
size
())
/*!
* \brief Creates an operation that broadcasts a tensor into a compatible
* shape according to numpy's rules
*
* \param t The input tensor
* \param output_shape The target output shape, must be compatible
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a broadcast operation
*/
inline
tvm
::
Tensor
broadcast_to
(
const
tvm
::
Tensor
&
t
,
const
tvm
::
Array
<
tvm
::
Expr
>&
output_shape
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
CHECK_GE
(
output_shape
.
size
(),
t
->
shape
.
size
())
<<
"Not a broadcast, output dimensionality smaller than input.
\n
output: "
<<
output_shape
<<
"
\n
vs
\n
input: "
<<
I
;
auto
bh
=
detail
::
BroadcastShape
(
output_shape
,
I
->
shape
);
<<
output_shape
<<
"
\n
vs
\n
input: "
<<
t
;
auto
bh
=
detail
::
BroadcastShape
(
output_shape
,
t
->
shape
);
CHECK_EQ
(
output_shape
.
size
(),
bh
.
common_shape
.
size
());
for
(
int
i
=
0
;
i
<
output_shape
.
size
();
++
i
)
{
CHECK
(
tvm
::
ir
::
Equal
(
output_shape
[
i
],
bh
.
common_shape
[
i
]));
}
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
return
I
(
detail
::
InputIndexFromBroadcast
(
ovars
,
I
,
bh
.
vars2
,
bh
.
all_vars
));
return
t
(
detail
::
InputIndexFromBroadcast
(
ovars
,
t
,
bh
.
vars2
,
bh
.
all_vars
));
};
return
tvm
::
compute
(
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
);
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
,
name
,
tag
);
}
inline
tvm
::
Tensor
broadcast_add
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise addition of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to add
* \param B The second tensor to add
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise addition with broadcast
*/
inline
tvm
::
Tensor
broadcast_add
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
+
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
inline
tvm
::
Tensor
broadcast_sub
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise subtraction of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to subtract from the first
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise subtraction with broadcast
*/
inline
tvm
::
Tensor
broadcast_sub
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
-
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
inline
tvm
::
Tensor
broadcast_mul
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise multiplication of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to multiply
* \param B The second tensor to multiply
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise multiplication with broadcast
*/
inline
tvm
::
Tensor
broadcast_mul
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
*
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
inline
tvm
::
Tensor
broadcast_div
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise division of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to divide the first tensor with
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise division with broadcast
*/
inline
tvm
::
Tensor
broadcast_div
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
/
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
inline
tvm
::
Tensor
broadcast_mod
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
/*!
* \brief Creates an operation that performs pointwise modulo remainder of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to compute A % B
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise modulo remainder with
* broadcast
*/
inline
tvm
::
Tensor
broadcast_mod
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kBroadcast
)
{
auto
l
=
[
&
](
tvm
::
Expr
a
,
tvm
::
Expr
b
)
{
return
a
%
b
;
};
return
detail
::
WithBroadcast
(
l
,
A
,
B
);
return
detail
::
WithBroadcast
(
l
,
A
,
B
,
name
,
tag
);
}
}
// namespace topi
...
...
topi/include/topi/detail/broadcast.h
View file @
cbdd14f1
/*
/*
!
* Copyright (c) 2017 by Contributors
* \brief Detail broadcast.
* \file broadcast.h
* \file
topi/detail/
broadcast.h
*/
#ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_
#include <algorithm>
#include <deque>
#include <string>
#include "tvm/ir_pass.h"
#include "tvm/tvm.h"
...
...
@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(
template
<
typename
FBinaryExpr
>
inline
tvm
::
Tensor
WithBroadcast
(
FBinaryExpr
op
,
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
)
{
inline
tvm
::
Tensor
WithBroadcast
(
FBinaryExpr
op
,
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
""
)
{
auto
bh
=
BroadcastShape
(
A
->
shape
,
B
->
shape
);
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
ovars
)
{
return
op
(
A
(
InputIndexFromBroadcast
(
ovars
,
A
,
bh
.
vars1
,
bh
.
all_vars
)),
B
(
InputIndexFromBroadcast
(
ovars
,
B
,
bh
.
vars2
,
bh
.
all_vars
)));
};
return
tvm
::
compute
(
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
);
tvm
::
Array
<
tvm
::
Expr
>
(
bh
.
common_shape
.
begin
(),
bh
.
common_shape
.
end
()),
l
,
name
,
tag
);
}
}
// namespace detail
...
...
topi/include/topi/ewise.h
View file @
cbdd14f1
...
...
@@ -6,17 +6,22 @@
#ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_
#include <tvm/tvm.h>
#include <string>
#include "topi/tags.h"
#include "tvm/tvm.h"
namespace
topi
{
using
namespace
tvm
;
// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \
inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
},
"tensor", "ewise");
\
},
name, tag);
\
}
TOPI_DECLARE_UNARY_OP
(
exp
);
...
...
topi/include/topi/nn.h
View file @
cbdd14f1
/*
/*
!
* Copyright (c) 2017 by Contributors
* \brief NN op constructions
* \file nn.h
...
...
@@ -7,7 +7,9 @@
#define TOPI_NN_H_
#include <algorithm>
#include <string>
#include "topi/tags.h"
#include "tvm/ir.h"
#include "tvm/ir_pass.h"
#include "tvm/tvm.h"
...
...
@@ -27,17 +29,65 @@ tvm::Expr Map(const tvm::Array<tvm::Expr>& exprs, T op) {
}
// namespace detail
/*!
* \brief Creates an operation that performs a rectified linear unit
*
* \param t The input tensor
* \param threshold The relu threshold (default 0)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the relu operation
*/
template
<
typename
T
>
inline
tvm
::
Tensor
relu
(
const
tvm
::
Tensor
&
x
,
T
threshold
=
static_cast
<
T
>
(
0
))
{
inline
tvm
::
Tensor
relu
(
const
tvm
::
Tensor
&
t
,
T
threshold
=
static_cast
<
T
>
(
0
),
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kElementWise
)
{
return
tvm
::
compute
(
x
->
shape
,
[
&
](
const
tvm
::
Array
<
tvm
::
Var
>&
i
)
{
return
tvm
::
max
(
x
(
i
),
threshold
);
},
"tensor"
,
"ewise"
);
t
->
shape
,
[
&
](
const
tvm
::
Array
<
tvm
::
Var
>&
i
)
{
return
tvm
::
max
(
t
(
i
),
threshold
);
},
name
,
tag
);
}
inline
tvm
::
Tensor
pad
(
const
tvm
::
Tensor
&
t
,
const
tvm
::
Array
<
tvm
::
Expr
>&
pad_before
,
tvm
::
Array
<
tvm
::
Expr
>
pad_after
=
tvm
::
Array
<
tvm
::
Expr
>
())
{
/*!
* \brief Creates an operation that performs padding
*
* \param t The input tensor
* \param pad_before An Array of Expr describing the padding before the
* respective iterator
* \param pad_after An Array of Expr describing the padding after the
* respective iterator
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the relu operation
*
* \note
* The pad_after Array must either be empty or have the same length as
* pad_before
* When pad_after is empty, it takes the same values as pad_before (symmetric
* padding)
* The pad Array applies from the leading dimensions and skips missing
* trailing dimensions:
*
* pad(t(i, j, k), {1}, {0}) returns the equivalent operation for
* the following pseudocode:
* for i in [1, t.shape[0] + 2]:
* for i in [1, t.shape[0] + 2]:
* for i in [1, t.shape[0] + 2]:
* name(i,j,k) =
* (1 <= i <= t.shape[0] + 1) ?
* t(i-1, j, k) : 0;
*
*
*/
inline
tvm
::
Tensor
pad
(
const
tvm
::
Tensor
&
t
,
const
tvm
::
Array
<
tvm
::
Expr
>&
pad_before
,
tvm
::
Array
<
tvm
::
Expr
>
pad_after
=
tvm
::
Array
<
tvm
::
Expr
>
(),
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kElementWise
)
{
if
(
pad_after
.
size
()
<
pad_before
.
size
())
{
for
(
int
i
=
pad_after
.
size
();
i
<
pad_before
.
size
();
++
i
)
{
pad_after
.
push_back
(
pad_before
[
i
]);
...
...
@@ -74,14 +124,30 @@ inline tvm::Tensor pad(
}
return
tvm
::
select
(
detail
::
Map
(
sel
,
tvm
::
ir
::
And
::
make
),
t
(
indices
),
0
);
};
return
tvm
::
compute
(
output_shape
,
l
,
"tensor"
,
"ewise"
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
// Returns a compute that calculates a row-major matrix multiplication:
// A(i, k) * B(k, j), if trans_a == trans_b
// the usual transposed combinations, otherwise
inline
tvm
::
Tensor
matmult
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
bool
trans_a
=
false
,
bool
trans_b
=
false
)
{
/*!
* \brief Creates an operation that calculates a matrix multiplication
* (row-major notation):
* A(i, k) * B(k, j), if trans_a == trans_b
* the usual transposed combinations, otherwise
*
* \param A The matrix A
* \param B The matrix B
* \param trans_a Is A's layout transposed?
* \param trans_b Is B's layout transposed?
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the matmult operation
*/
inline
tvm
::
Tensor
matmult
(
const
tvm
::
Tensor
&
A
,
const
tvm
::
Tensor
&
B
,
bool
trans_a
=
false
,
bool
trans_b
=
false
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kMatMult
)
{
tvm
::
Array
<
tvm
::
Expr
>
output_shape
{
A
->
shape
[
trans_a
?
1
:
0
],
B
->
shape
[
trans_b
?
0
:
1
]};
auto
k
=
tvm
::
reduce_axis
(
tvm
::
Range
{
0
,
A
->
shape
[
trans_a
?
0
:
1
]},
"k"
);
...
...
@@ -89,12 +155,37 @@ inline tvm::Tensor matmult(const tvm::Tensor& A, const tvm::Tensor& B,
return
tvm
::
sum
((
trans_a
?
A
[
k
][
i
]
:
A
[
i
][
k
])
*
(
trans_b
?
B
[
j
][
k
]
:
B
[
k
][
j
]),
{
k
});
};
return
tvm
::
compute
(
output_shape
,
l
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
inline
tvm
::
Tensor
conv2d_nchw
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
)
{
/*!
* \brief Creates an operation that performs a 2-D convolution with an
* NCHW-layout
*
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D convolution operation (NCHW
* layout)
*/
inline
tvm
::
Tensor
conv2d_nchw
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kConv2dNCHW
)
{
CHECK_EQ
(
4
,
I
->
shape
.
size
());
CHECK_EQ
(
4
,
W
->
shape
.
size
());
auto
pH
=
I
->
shape
[
2
];
...
...
@@ -116,12 +207,36 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, const tvm::Tensor& W,
T
(
b
,
i
,
stride_h
*
h
+
kh
,
stride_w
*
w
+
kw
)
*
W
(
i
,
o
,
kh
,
kw
),
{
i
,
kh
,
kw
});
};
return
tvm
::
compute
(
output_shape
,
l
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
inline
tvm
::
Tensor
conv2d_hwcn
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
)
{
/*!
* \brief Creates an operation for 2-D convolution layer with an HWCN-layout
*
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D convolution operation
* (HWCN layout)
*/
inline
tvm
::
Tensor
conv2d_hwcn
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kConv2dHWCN
)
{
CHECK_EQ
(
4
,
I
->
shape
.
size
());
CHECK_EQ
(
4
,
W
->
shape
.
size
());
auto
pH
=
I
->
shape
[
2
];
...
...
@@ -141,13 +256,38 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, const tvm::Tensor& W,
T
(
stride_h
*
h
+
kh
,
stride_w
*
w
+
kw
,
i
,
b
)
*
W
(
kh
,
kw
,
i
,
o
),
{
i
,
kh
,
kw
});
};
return
tvm
::
compute
(
output_shape
,
l
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
/*!
* \brief Creates an operation that performs a 2-D depthwise convolution with
* an NCHW-layout
*
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D depthwise convolution operation
* (NCHW layout)
*/
inline
tvm
::
Tensor
depthwise_conv2d_nchw
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
)
{
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kDepthwiseConv2d
)
{
CHECK_EQ
(
4
,
I
->
shape
.
size
());
CHECK_EQ
(
4
,
W
->
shape
.
size
());
auto
pH
=
I
->
shape
[
2
];
...
...
@@ -170,13 +310,37 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
W
(
i
/
pCM
,
o
%
pCM
,
kh
,
kw
),
{
i
,
kh
,
kw
});
};
return
tvm
::
compute
(
output_shape
,
l
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
/*!
* \brief Creates an operation that performs a 2-D group convolution with
* an NGCHW-layout
*
* \param I The 5-D input tensor
* \param W The 5-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D groupconvolution operation
* (NCHW layout)
*/
inline
tvm
::
Tensor
group_conv2d_ngchw
(
const
tvm
::
Tensor
&
I
,
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
)
{
const
tvm
::
Tensor
&
W
,
int
pad_h
=
0
,
int
pad_w
=
0
,
int
stride_h
=
1
,
int
stride_w
=
1
,
std
::
string
name
=
"tensor"
,
std
::
string
tag
=
kGroupConv2d
)
{
CHECK_EQ
(
5
,
I
->
shape
.
size
());
CHECK_EQ
(
5
,
W
->
shape
.
size
());
auto
pH
=
I
->
shape
[
2
];
...
...
@@ -195,12 +359,17 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
auto
T
=
(
pad_h
==
0
&&
pad_w
==
0
)
?
I
:
pad
(
I
,
{
tvm
::
Expr
(
0
),
tvm
::
Expr
(
0
),
tvm
::
Expr
(
0
),
pad_h
,
pad_w
});
auto
l
=
[
&
](
tvm
::
Var
b
,
tvm
::
Var
g
,
tvm
::
Var
o
,
tvm
::
Var
h
,
tvm
::
Var
w
)
{
auto
l
=
[
&
](
tvm
::
Array
<
tvm
::
Var
>
args
)
{
tvm
::
Var
b
=
args
[
0
];
tvm
::
Var
g
=
args
[
1
];
tvm
::
Var
o
=
args
[
2
];
tvm
::
Var
h
=
args
[
3
];
tvm
::
Var
w
=
args
[
4
];
return
tvm
::
sum
(
I
(
b
,
g
,
i
,
stride_h
*
h
+
kh
,
stride_w
*
w
+
kw
)
*
W
(
g
,
i
,
o
,
kh
,
kw
),
{
i
,
kh
,
kw
});
};
return
tvm
::
compute
(
output_shape
,
l
);
return
tvm
::
compute
(
output_shape
,
l
,
name
,
tag
);
}
}
// namespace topi
...
...
topi/include/topi/tags.h
0 → 100644
View file @
cbdd14f1
/*!
* Copyright (c) 2017 by Contributors
* \brief Tag definitions
* \file tags.h
*/
#ifndef TOPI_TAGS_H_
#define TOPI_TAGS_H_
namespace
topi
{
constexpr
auto
kElementWise
=
"ewise"
;
constexpr
auto
kBroadcast
=
"bcast"
;
constexpr
auto
kMatMult
=
"matmult"
;
constexpr
auto
kConv2dNCHW
=
"conv2d_nchw"
;
constexpr
auto
kConv2dHWCN
=
"conv2d_hwcn"
;
constexpr
auto
kDepthwiseConv2d
=
"depthwise_conv2d"
;
constexpr
auto
kGroupConv2d
=
"group_conv2d"
;
}
// namespace topi
#endif // TOPI_TAGS_H_
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment