Commit 262345fa by Tianqi Chen Committed by GitHub

[TOPI] dense API to remove redudant use_bias (#476)

parent 833855e7
...@@ -4,7 +4,7 @@ import tvm ...@@ -4,7 +4,7 @@ import tvm
from .. import tag from .. import tag
def dense(data, weight, bias, use_bias=True): def dense(data, weight, bias=None):
"""Applies a linear transformation: :math:`Y = XW^T + b`. """Applies a linear transformation: :math:`Y = XW^T + b`.
Parameters Parameters
...@@ -15,27 +15,26 @@ def dense(data, weight, bias, use_bias=True): ...@@ -15,27 +15,26 @@ def dense(data, weight, bias, use_bias=True):
weight : tvm.Tensor weight : tvm.Tensor
2-D with shape [out_dim, in_dim] 2-D with shape [out_dim, in_dim]
bias : tvm.Tensor bias : tvm.Tensor, optional
1-D with shape [out_dim] 1-D with shape [out_dim]
use_bias : bool, optional, default=True
Whether to use bias parameter
Returns Returns
------- -------
output : tvm.Tensor output : tvm.Tensor
2-D with shape [batch, out_dim] 2-D with shape [batch, out_dim]
""" """
assert len(data.shape) == 2 and len(weight.shape) == 2 and len(bias.shape) == 1, \ assert len(data.shape) == 2 and len(weight.shape) == 2, \
"only support 2-dim dense" "only support 2-dim dense"
if bias:
assert len(bias.shape) == 1
batch, in_dim = data.shape batch, in_dim = data.shape
out_dim, _ = weight.shape out_dim, _ = weight.shape
k = tvm.reduce_axis((0, in_dim), name='k') k = tvm.reduce_axis((0, in_dim), name='k')
matmul = tvm.compute((batch, out_dim), \ matmul = tvm.compute((batch, out_dim), \
lambda i, j: tvm.sum(data[i, k] * weight[j, k], axis=k), \ lambda i, j: tvm.sum(data[i, k] * weight[j, k], axis=k), \
tag='dense') tag='dense')
if not use_bias: if bias:
return matmul matmul = tvm.compute((batch, out_dim), \
return tvm.compute((batch, out_dim), \ lambda i, j: matmul[i, j] + bias[j], \
lambda i, j: matmul[i, j] + bias[j], \ tag=tag.BROADCAST)
tag=tag.BROADCAST) return matmul
...@@ -10,7 +10,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True): ...@@ -10,7 +10,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True):
A = tvm.placeholder((batch, in_dim), name='A') A = tvm.placeholder((batch, in_dim), name='A')
B = tvm.placeholder((out_dim, in_dim), name='B') B = tvm.placeholder((out_dim, in_dim), name='B')
C = tvm.placeholder((out_dim,), name='C') C = tvm.placeholder((out_dim,), name='C')
D = topi.nn.dense(A, B, C, use_bias=use_bias) D = topi.nn.dense(A, B, C if use_bias else None)
D = topi.nn.relu(D) D = topi.nn.relu(D)
s = topi.cuda.schedule_dense(D) s = topi.cuda.schedule_dense(D)
dtype = A.dtype dtype = A.dtype
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment