unary.cc 11 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
9
 *
10
 *   http://www.apache.org/licenses/LICENSE-2.0
11
 *
12 13 14 15 16 17 18 19
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

Tianqi Chen committed
20 21 22 23 24 25
/*!
 * \file unary.cc
 * \brief Unary operators.
 */
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
ziheng committed
26
#include <tvm/relay/attrs/transform.h>
27
#include <topi/elemwise.h>
28
#include <topi/transform.h>
Tianqi Chen committed
29
#include "../type_relations.h"
雾雨魔理沙 committed
30
#include "../op_common.h"
Tianqi Chen committed
31 32 33 34

namespace tvm {
namespace relay {

35 36 37 38 39 40 41 42
#define RELAY_UNARY_COMPUTE(FTOPI)                      \
  [] (const Attrs& attrs,                               \
      const Array<Tensor>& inputs,                      \
      const Type& out_type,                             \
      const Target& target) -> Array<Tensor> {          \
    return {FTOPI(inputs[0])};                          \
  }                                                     \

雾雨魔理沙 committed
43

44
RELAY_REGISTER_UNARY_OP("log")
雾雨魔理沙 committed
45
.describe(R"code(Returns the log input array, computed element-wise.
Tianqi Chen committed
46 47 48 49 50 51

.. math::
   log(x)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
52 53
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::log));

Tianqi Chen committed
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
RELAY_REGISTER_UNARY_OP("cos")
.describe(R"code(Returns the cos of input array, computed element-wise.

.. math::
   Y = cos(X)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::cos));


RELAY_REGISTER_UNARY_OP("sin")
.describe(R"code(Returns the sin of input array, computed element-wise.

.. math::
   Y = sin(X)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::sin));


77 78 79 80 81 82 83 84 85 86 87
RELAY_REGISTER_UNARY_OP("atan")
.describe(R"code(Returns the atan of input array, computed element-wise.

.. math::
   Y = atan(X)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::atan));


88
RELAY_REGISTER_UNARY_OP("exp")
雾雨魔理沙 committed
89
.describe(R"code(Returns the exp input array, computed element-wise.
Tianqi Chen committed
90 91 92 93 94 95

.. math::
   \exp(x)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
96
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::exp));
Tianqi Chen committed
97

98 99 100 101 102 103 104 105 106 107 108 109

RELAY_REGISTER_UNARY_OP("erf")
.describe(R"code(Returns the error function value for input array, computed element-wise.

.. math::
   \erf(x)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::erf));


110
RELAY_REGISTER_UNARY_OP("sqrt")
111
.describe(R"code(Returns the sqrt input array, computed element-wise.
112 113 114 115

.. math::
   sqrt(x)

116 117
)code" TVM_ADD_FILELINE)
.set_support_level(1)
118 119
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::sqrt));

120 121 122 123 124 125 126 127 128
RELAY_REGISTER_UNARY_OP("rsqrt")
.describe(R"code(Returns the rsqrt input array, computed element-wise.

.. math::
   1/sqrt(x)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::rsqrt));
Tianqi Chen committed
129

130
RELAY_REGISTER_UNARY_OP("zeros_like")
131
.describe(R"code(Returns an array of zeros, with same type and shape as the input.
Tianqi Chen committed
132
)code" TVM_ADD_FILELINE)
133
.set_support_level(4);
Tianqi Chen committed
134

135
RELAY_REGISTER_UNARY_OP("ones_like")
136 137
.describe(R"code(Returns an array of ones, with same type and shape as the input.
)code" TVM_ADD_FILELINE)
138
.set_support_level(4);
Tianqi Chen committed
139

140
RELAY_REGISTER_UNARY_OP("sigmoid")
141 142 143 144 145 146 147
.describe(R"code(Returns the sigmoid input array, computed element-wise.

.. math::
   sigmoid(x)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
148 149
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::sigmoid));

150

151
RELAY_REGISTER_UNARY_OP("copy")
152 153 154
.describe(R"code(Copy a tensor.
)code" TVM_ADD_FILELINE)
.set_support_level(3)
155 156
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::identity));

ziheng committed
157 158
// relay.clip
TVM_REGISTER_NODE_TYPE(ClipAttrs);
159 160

TVM_REGISTER_API("relay.op._make.clip")
161 162 163 164 165 166 167
.set_body_typed<Expr(Expr, double, double)>([](Expr a, double a_min, double a_max) {
    auto attrs = make_node<ClipAttrs>();
    attrs->a_min = a_min;
    attrs->a_max = a_max;
    static const Op& op = Op::Get("clip");
  return CallNode::make(op, {a}, Attrs(attrs), {});
});
168 169

RELAY_REGISTER_OP("clip")
170 171 172 173 174 175 176 177 178
.describe(R"code(Clip tensor values.
This function takes a tensor, a minimum value `a_min`, and a maximum value `a_max`, and returns a clipped tensor where all values below `a_min` are set to `a_min` and all values above `a_max` are set to `a_max`. `a_min` and `a_max` are cast to the tensor's dtype.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kElemWise)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
179
.set_attrs_type<ClipAttrs>()
180
.set_support_level(3);
雾雨魔理沙 committed
181

182

183
RELAY_REGISTER_UNARY_OP("floor")
184 185 186
.describe(R"code(Returns the floor of input array, computed element-wise.
)code" TVM_ADD_FILELINE)
.set_support_level(3)
187 188
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::floor));

189

190
RELAY_REGISTER_UNARY_OP("ceil")
191 192 193 194 195 196 197
.describe(R"code(Returns the ceil of input array, computed element-wise.

.. math::
   ceil(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
198 199
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::ceil));

200

201
RELAY_REGISTER_UNARY_OP("trunc")
202 203 204 205 206 207 208
.describe(R"code(Returns the trunc of input array, computed element-wise.

.. math::
   trunc(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
209 210
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::trunc));

211
RELAY_REGISTER_UNARY_OP("round")
212 213 214 215 216 217 218
.describe(R"code(Returns the round of input array, computed element-wise.

.. math::
   round(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
219 220
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::round));

221 222 223 224 225 226 227 228 229 230
RELAY_REGISTER_UNARY_OP("sign")
.describe(R"code(Returns the sign of input array, computed element-wise.

.. numpy::
   sign(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::sign));

231

232
RELAY_REGISTER_UNARY_OP("abs")
233 234 235 236 237 238 239
.describe(R"code(Returns the abs of input array, computed element-wise.

.. math::
   abs(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
240 241
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::abs));

242

243
RELAY_REGISTER_UNARY_OP("tanh")
244 245 246 247 248 249 250
.describe(R"code(Returns the tanh of input array, computed element-wise.

.. math::
   Y = sinh(X) / cosh(X)

)code" TVM_ADD_FILELINE)
.set_support_level(1)
251 252
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::tanh));

253

254
RELAY_REGISTER_UNARY_OP("negative")
255 256 257 258 259 260 261
.describe(R"code(Returns the numeric negative of input array, computed element-wise.

.. math::
   -(x)

)code" TVM_ADD_FILELINE)
.set_support_level(3)
262
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::negative));
263

264 265 266 267 268 269 270 271 272 273 274

RELAY_REGISTER_UNARY_OP("logical_not")
.describe(R"code(Returns the logical inverse of input array, computed element-wise.

.. math::
   ~(x)

)code" TVM_ADD_FILELINE)
.set_support_level(4)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::logical_not));

275 276 277 278 279 280 281 282 283 284 285 286 287

// shape_of
TVM_REGISTER_NODE_TYPE(ShapeOfAttrs);

bool ShapeOfRel(const Array<Type>& types,
                int num_inputs,
                const Attrs& attrs,
                const TypeReporter& reporter) {
  CHECK_EQ(num_inputs, 1);
  auto tt = types[0].as<TensorTypeNode>();
  CHECK(tt != nullptr);
  const auto* param = attrs.as<ShapeOfAttrs>();
  CHECK(param != nullptr);
288 289
  auto rank_shape = RankShape(tt->shape);
  reporter->Assign(types[1], TensorTypeNode::make(rank_shape, param->dtype));
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
  return true;
}

Array<Tensor> ShapeOfCompute(const Attrs& attrs,
                             const Array<Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
  CHECK_EQ(inputs.size(), 1);
  const auto* param = attrs.as<ShapeOfAttrs>();
  CHECK(param != nullptr);
  return {topi::shape(inputs[0], param->dtype)};
}

TVM_REGISTER_API("relay.op._make.shape_of")
.set_body_typed<Expr(Expr, DataType)>([](Expr data, DataType dtype) {
  auto attrs = make_node<ShapeOfAttrs>();
  attrs->dtype = dtype;
  static const Op& op = Op::Get("shape_of");
  return CallNode::make(op, {data}, Attrs(attrs), {});
});

RELAY_REGISTER_OP("shape_of")
.describe(R"code(Returns a tensor representing the shape of a tensor.

)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
316
.set_attrs_type<ShapeOfAttrs>()
317 318 319
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("ShapeOf", ShapeOfRel)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
320 321 322
// Use kOpaque for shape_of op for now since it won't be performance critic,
// and it makes things easier for dynamic shape func
.set_attr<TOpPattern>("TOpPattern", kOpaque)
323 324 325 326 327
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                               ElemwiseArbitraryLayout)
.set_support_level(10)
.set_attr<FTVMCompute>("FTVMCompute", ShapeOfCompute);

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

TVM_REGISTER_NODE_TYPE(NdarraySizeAttrs);

bool NdarraySizeRel(const Array<Type>& types,
             int num_inputs,
             const Attrs& attrs,
             const TypeReporter& reporter) {
  CHECK_EQ(num_inputs, 1);
  auto tt = types[0].as<TensorTypeNode>();
  CHECK(tt != nullptr);
  const auto* param = attrs.as<NdarraySizeAttrs>();
  CHECK(param != nullptr);
  reporter->Assign(types[1], TensorTypeNode::make({1}, param->dtype));
  return true;
}

Array<Tensor> NdarraySizeCompute(const Attrs& attrs,
                          const Array<Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
  CHECK_EQ(inputs.size(), 1);
  const auto* param = attrs.as<NdarraySizeAttrs>();
  CHECK(param != nullptr);
  return Array<Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
}

TVM_REGISTER_API("relay.op.contrib._make.ndarray_size")
.set_body_typed<Expr(Expr, DataType)>([](Expr data, DataType dtype) {
  auto attrs = make_node<NdarraySizeAttrs>();
  attrs->dtype = dtype;
  static const Op& op = Op::Get("contrib.ndarray_size");
  return CallNode::make(op, {data}, Attrs(attrs), {});
});

RELAY_REGISTER_OP("contrib.ndarray_size")
.describe(R"code(Returns a tensor representing the number of elements of input tensor.

)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
367
.set_attrs_type<NdarraySizeAttrs>()
368 369 370 371 372 373 374 375 376
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("NdarraySize", NdarraySizeRel)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TOpPattern>("TOpPattern", kInjective)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_support_level(10)
.set_attr<FTVMCompute>("FTVMCompute", NdarraySizeCompute);

Tianqi Chen committed
377 378
}  // namespace relay
}  // namespace tvm