Commit f33b9eae by Alexander Pivovarov Committed by Yao Wang

Fix some typos in api docs (#3309)

parent df16182b
...@@ -83,7 +83,7 @@ struct Error : public dmlc::Error { ...@@ -83,7 +83,7 @@ struct Error : public dmlc::Error {
* *
* The final mode represents the old mode, if we report an error that has no span or * The final mode represents the old mode, if we report an error that has no span or
* expression, we will default to throwing an exception with a textual representation * expression, we will default to throwing an exception with a textual representation
* of the error and no indication of where it occured in the original program. * of the error and no indication of where it occurred in the original program.
* *
* The latter mode is not ideal, and the goal of the new error reporting machinery is * The latter mode is not ideal, and the goal of the new error reporting machinery is
* to avoid ever reporting errors in this style. * to avoid ever reporting errors in this style.
......
...@@ -187,7 +187,7 @@ TVM_DLL void TVMAPISetLastError(const char* msg); ...@@ -187,7 +187,7 @@ TVM_DLL void TVMAPISetLastError(const char* msg);
/*! /*!
* \brief return str message of the last error * \brief return str message of the last error
* all function in this file will return 0 when success * all function in this file will return 0 when success
* and -1 when an error occured, * and -1 when an error occurred,
* TVMGetLastError can be called to retrieve the error * TVMGetLastError can be called to retrieve the error
* *
* this function is threadsafe and can be called by different thread * this function is threadsafe and can be called by different thread
......
...@@ -60,7 +60,7 @@ NNVM_DLL void NNAPISetLastError(const char* msg); ...@@ -60,7 +60,7 @@ NNVM_DLL void NNAPISetLastError(const char* msg);
/*! /*!
* \brief return str message of the last error * \brief return str message of the last error
* all function in this file will return 0 when success * all function in this file will return 0 when success
* and -1 when an error occured, * and -1 when an error occurred,
* NNGetLastError can be called to retrieve the error * NNGetLastError can be called to retrieve the error
* *
* this function is threadsafe and can be called by different thread * this function is threadsafe and can be called by different thread
......
...@@ -58,7 +58,7 @@ class Renamer(object): ...@@ -58,7 +58,7 @@ class Renamer(object):
class AttrConverter(object): class AttrConverter(object):
"""Common attribute conveter. An AttrConverter instance is a callable: """Common attribute converter. An AttrConverter instance is a callable:
``` ```
attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)}) attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)})
new_op_name, new_attr = attr_converter(attrs) new_op_name, new_attr = attr_converter(attrs)
...@@ -72,12 +72,12 @@ class AttrConverter(object): ...@@ -72,12 +72,12 @@ class AttrConverter(object):
`op_name = func(attr)` `op_name = func(attr)`
transforms : dict of `new_name, or (new_name, default_value, transform function)` transforms : dict of `new_name, or (new_name, default_value, transform function)`
If only a new_name is provided, it's like renaming the attribute name. If only a new_name is provided, it's like renaming the attribute name.
If default_value if provded, then the attribute is considered as optional. If default_value if provided, then the attribute is considered as optional.
If transform function is provided, the original attribute value is handled If transform function is provided, the original attribute value is handled
by transform function. by transform function.
excludes : list excludes : list
A list of excluded attributes that should `NOT` appear. A list of excluded attributes that should `NOT` appear.
Raise NotImplementedError if occured. Raise NotImplementedError if occurred.
disables : list disables : list
A list of attributes that is disabled in nnvm. Log warnings. A list of attributes that is disabled in nnvm. Log warnings.
ignores : list ignores : list
......
...@@ -1177,7 +1177,7 @@ class GraphProto(object): ...@@ -1177,7 +1177,7 @@ class GraphProto(object):
-> All Const nodes are params. -> All Const nodes are params.
-> Last node is assumed as graph output. -> Last node is assumed as graph output.
-> _output_shapes : Graph should be frozen with add_shapes=True. -> _output_shapes : Graph should be frozen with add_shapes=True.
Or user can pass input shape dictionaly optionally. Or user can pass input shape dictionary optionally.
-> DecodeJpeg, ResizeBilinear: These are dummy operators. -> DecodeJpeg, ResizeBilinear: These are dummy operators.
Hence user should handle preprocessing outside. Hence user should handle preprocessing outside.
-> CheckNumerics: No implementation as of now for this. -> CheckNumerics: No implementation as of now for this.
......
...@@ -286,7 +286,7 @@ class ExprTable(object): ...@@ -286,7 +286,7 @@ class ExprTable(object):
class AttrCvt(object): class AttrCvt(object):
"""Common attribute conveter. An AttrConverter instance is a callable: """Common attribute converter. An AttrConverter instance is a callable:
``` ```
attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)}) attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)})
new_op_name, new_attr = attr_converter(attrs) new_op_name, new_attr = attr_converter(attrs)
...@@ -300,12 +300,12 @@ class AttrCvt(object): ...@@ -300,12 +300,12 @@ class AttrCvt(object):
`op_name = func(attr)` `op_name = func(attr)`
transforms : dict of `new_name, or (new_name, default_value, transform function)` transforms : dict of `new_name, or (new_name, default_value, transform function)`
If only a new_name is provided, it's like renaming the attribute name. If only a new_name is provided, it's like renaming the attribute name.
If default_value if provded, then the attribute is considered as optional. If default_value if provided, then the attribute is considered as optional.
If transform function is provided, the original attribute value is handled If transform function is provided, the original attribute value is handled
by transform function. by transform function.
excludes : list excludes : list
A list of excluded attributes that should `NOT` appear. A list of excluded attributes that should `NOT` appear.
Raise NotImplementedError if occured. Raise NotImplementedError if occurred.
disables : list disables : list
A list of attributes that is disabled in relay. Log warnings. A list of attributes that is disabled in relay. Log warnings.
ignores : list ignores : list
......
...@@ -77,12 +77,12 @@ class AttrCvt(object): ...@@ -77,12 +77,12 @@ class AttrCvt(object):
`op_name = func(attr)` `op_name = func(attr)`
transforms : dict of `new_name, or (new_name, default_value, transform function)` transforms : dict of `new_name, or (new_name, default_value, transform function)`
If only a new_name is provided, it's like renaming the attribute name. If only a new_name is provided, it's like renaming the attribute name.
If default_value if provded, then the attribute is considered as optional. If default_value if provided, then the attribute is considered as optional.
If transform function is provided, the original attribute value is handled If transform function is provided, the original attribute value is handled
by transform function. by transform function.
excludes : list excludes : list
A list of excluded attributes that should `NOT` appear. A list of excluded attributes that should `NOT` appear.
Raise NotImplementedError if occured. Raise NotImplementedError if occurred.
disables : list disables : list
A list of attributes that is disabled in relay. Log warnings. A list of attributes that is disabled in relay. Log warnings.
ignores : list ignores : list
...@@ -1567,7 +1567,7 @@ def _in_while_loop(control_flow_node_map, op_name): ...@@ -1567,7 +1567,7 @@ def _in_while_loop(control_flow_node_map, op_name):
Parameters Parameters
---------- ----------
control_flow_node_map : Dict[str, Set[str]] control_flow_node_map : Dict[str, Set[str]]
A dictionay contains the unqiue control flow execution frame name to A dictionay contains the unique control flow execution frame name to
a set of primitive operators mapping. a set of primitive operators mapping.
op_name : str op_name : str
...@@ -1619,7 +1619,7 @@ class Branch: ...@@ -1619,7 +1619,7 @@ class Branch:
return tf.add(4, 23) return tf.add(4, 23)
r = tf.cond(tf.less(i, j), f1, f2) r = tf.cond(tf.less(i, j), f1, f2)
This condition statement should be coverted into Relay in the following This condition statement should be converted into Relay in the following
form: form:
.. code-block:: python .. code-block:: python
...@@ -1727,7 +1727,7 @@ class Loop: ...@@ -1727,7 +1727,7 @@ class Loop:
self._loop = None self._loop = None
def _while_loop(self): def _while_loop(self):
"""An internal API to create a Relay recurisve call for a matched TF """An internal API to create a Relay recursive call for a matched TF
`while_loop` construct. `while_loop` construct.
""" """
wl = tvm.relay.var('while_loop') wl = tvm.relay.var('while_loop')
...@@ -1796,7 +1796,7 @@ class GraphProto(object): ...@@ -1796,7 +1796,7 @@ class GraphProto(object):
-> All Const nodes are params. -> All Const nodes are params.
-> Last node is assumed as graph output. -> Last node is assumed as graph output.
-> _output_shapes : Graph should be frozen with add_shapes=True. -> _output_shapes : Graph should be frozen with add_shapes=True.
Or user can pass input shape dictionaly optionally. Or user can pass input shape dictionary optionally.
-> DecodeJpeg, ResizeBilinear: These are dummy operators. -> DecodeJpeg, ResizeBilinear: These are dummy operators.
Hence user should handle preprocessing outside. Hence user should handle preprocessing outside.
-> CheckNumerics: No implementation as of now for this. -> CheckNumerics: No implementation as of now for this.
......
...@@ -67,7 +67,7 @@ def conv2d(data, ...@@ -67,7 +67,7 @@ def conv2d(data,
The weight expressions. The weight expressions.
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
...@@ -129,7 +129,7 @@ def conv2d_transpose(data, ...@@ -129,7 +129,7 @@ def conv2d_transpose(data,
The weight expressions. The weight expressions.
strides : Tuple[int], optional strides : Tuple[int], optional
The strides of convoltution. The strides of convolution.
padding : Tuple[int], optional padding : Tuple[int], optional
The padding of convolution on both sides of inputs. The padding of convolution on both sides of inputs.
...@@ -842,7 +842,7 @@ def contrib_conv2d_winograd_without_weight_transform(data, ...@@ -842,7 +842,7 @@ def contrib_conv2d_winograd_without_weight_transform(data,
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3) The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
...@@ -908,7 +908,7 @@ def contrib_conv2d_winograd_nnpack_without_weight_transform(data, ...@@ -908,7 +908,7 @@ def contrib_conv2d_winograd_nnpack_without_weight_transform(data,
The weight expressions. The weight expressions.
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
...@@ -975,7 +975,7 @@ def contrib_conv2d_nchwc(data, ...@@ -975,7 +975,7 @@ def contrib_conv2d_nchwc(data,
The kernel expressions. The kernel expressions.
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
...@@ -1040,7 +1040,7 @@ def contrib_depthwise_conv2d_nchwc(data, ...@@ -1040,7 +1040,7 @@ def contrib_depthwise_conv2d_nchwc(data,
The kernel expressions. The kernel expressions.
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
...@@ -1156,7 +1156,7 @@ def deformable_conv2d(data, ...@@ -1156,7 +1156,7 @@ def deformable_conv2d(data,
The weight expressions. The weight expressions.
strides : tuple of int, optional strides : tuple of int, optional
The strides of convoltution. The strides of convolution.
padding : tuple of int, optional padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution. The padding of convolution on both sides of inputs before convolution.
......
...@@ -373,7 +373,7 @@ class TCPSocket : public Socket { ...@@ -373,7 +373,7 @@ class TCPSocket : public Socket {
} }
/*! /*!
* \brief decide whether the socket is at OOB mark * \brief decide whether the socket is at OOB mark
* \return 1 if at mark, 0 if not, -1 if an error occured * \return 1 if at mark, 0 if not, -1 if an error occurred
*/ */
int AtMark() const { int AtMark() const {
#ifdef _WIN32 #ifdef _WIN32
......
...@@ -50,7 +50,7 @@ namespace ir { ...@@ -50,7 +50,7 @@ namespace ir {
* - assert bufferB.shape[1] == n + 3 * - assert bufferB.shape[1] == n + 3
* *
* In general, this is a constraint solving problem. We have simplified assumption * In general, this is a constraint solving problem. We have simplified assumption
* over the binding declaration, such that we require the variable occured in * over the binding declaration, such that we require the variable occurred in
* constraint must be declared in argument list. So it is illegal to have signature * constraint must be declared in argument list. So it is illegal to have signature
* f(tA(shape=(n+3))) without any argument variable corresponds to n, even though * f(tA(shape=(n+3))) without any argument variable corresponds to n, even though
* it is already enough to derive n from the input argument. * it is already enough to derive n from the input argument.
......
...@@ -37,7 +37,7 @@ def _schedule_reduce(op, sch, is_idx_reduce=False): ...@@ -37,7 +37,7 @@ def _schedule_reduce(op, sch, is_idx_reduce=False):
num_thread = 32 num_thread = 32
target = tvm.target.current_target() target = tvm.target.current_target()
if target and target.target_name == "opencl": if target and target.target_name == "opencl":
# without it, CL_INVALID_WORK_GROUP_SIZE occured when running test_topi_reduce.py # without it, CL_INVALID_WORK_GROUP_SIZE occurred when running test_topi_reduce.py
# don't know why # don't know why
num_thread = 16 num_thread = 16
block_x = tvm.thread_axis("blockIdx.x") block_x = tvm.thread_axis("blockIdx.x")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment