Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
1022ad7c
Commit
1022ad7c
authored
Sep 25, 2018
by
Siju
Committed by
Tianqi Chen
Sep 24, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[DOC]Errors corrected (#1767)
parent
48cf48b2
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
13 additions
and
13 deletions
+13
-13
include/tvm/ir_pass.h
+1
-1
python/tvm/_ffi/runtime_ctypes.py
+1
-1
python/tvm/schedule.py
+1
-1
python/tvm/tensor_intrin.py
+1
-1
src/codegen/codegen_c.cc
+1
-1
src/codegen/verilog/codegen_verilog.cc
+2
-2
src/op/tensorize.cc
+2
-2
src/runtime/pack_args.h
+1
-1
src/runtime/rpc/rpc_session.cc
+2
-2
vta/python/vta/ir_pass.py
+1
-1
No files found.
include/tvm/ir_pass.h
View file @
1022ad7c
...
...
@@ -217,7 +217,7 @@ Stmt NarrowChannelAccess(Stmt stmt);
* \param auto_max_step The maximum step before stop attach automatic unroll
* \param auto_max_depth The maximum depth before stop attach automatic unroll
* \param auto_max_extent The maximum extent of the loop we can unroll,
*
this is an legacy option that do
not take the loop total steps into account.
*
this is an legacy option that do
not take the loop total steps into account.
* \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen.
* \return Transformed stmt.
*/
...
...
python/tvm/_ffi/runtime_ctypes.py
View file @
1022ad7c
...
...
@@ -67,7 +67,7 @@ class TVMType(ctypes.Structure):
bits
=
64
head
=
""
else
:
raise
ValueError
(
"Donot know how to handle type
%
s"
%
type_str
)
raise
ValueError
(
"Do
not know how to handle type
%
s"
%
type_str
)
bits
=
int
(
head
)
if
head
else
bits
self
.
bits
=
bits
...
...
python/tvm/schedule.py
View file @
1022ad7c
...
...
@@ -362,7 +362,7 @@ class Stage(NodeBase):
"""
if
nparts
is
not
None
:
if
factor
is
not
None
:
raise
ValueError
(
"Donot need to provide both outer and nparts"
)
raise
ValueError
(
"Do
not need to provide both outer and nparts"
)
outer
,
inner
=
_api_internal
.
_StageSplitByNParts
(
self
,
parent
,
nparts
)
else
:
if
factor
is
None
:
...
...
python/tvm/tensor_intrin.py
View file @
1022ad7c
...
...
@@ -72,7 +72,7 @@ def decl_tensor_intrin(op,
binds_list
=
[]
for
t
in
inputs
:
if
not
isinstance
(
t
.
op
,
_tensor
.
PlaceholderOp
):
raise
ValueError
(
"Donot yet support composition op"
)
raise
ValueError
(
"Do
not yet support composition op"
)
cfg
=
current_build_config
()
for
t
in
tensors
:
...
...
src/codegen/codegen_c.cc
View file @
1022ad7c
...
...
@@ -207,7 +207,7 @@ std::string CodeGenC::GetStructRef(
}
else
if
(
t
.
is_int
())
{
os
<<
"v_int64"
;
}
else
{
LOG
(
FATAL
)
<<
"
do
not know how to handle type"
<<
t
;
LOG
(
FATAL
)
<<
"
Do
not know how to handle type"
<<
t
;
}
os
<<
")"
;
return
os
.
str
();
...
...
src/codegen/verilog/codegen_verilog.cc
View file @
1022ad7c
...
...
@@ -213,11 +213,11 @@ VerilogValue CodeGenVerilog::VisitExpr_(const UIntImm *op) {
return
IntConst
(
op
,
this
);
}
VerilogValue
CodeGenVerilog
::
VisitExpr_
(
const
FloatImm
*
op
)
{
LOG
(
FATAL
)
<<
"Donot support float constant in Verilog"
;
LOG
(
FATAL
)
<<
"Do
not support float constant in Verilog"
;
return
VerilogValue
();
}
VerilogValue
CodeGenVerilog
::
VisitExpr_
(
const
StringImm
*
op
)
{
LOG
(
FATAL
)
<<
"Donot support string constant in Verilog"
;
LOG
(
FATAL
)
<<
"Do
not support string constant in Verilog"
;
return
VerilogValue
();
}
...
...
src/op/tensorize.cc
View file @
1022ad7c
...
...
@@ -52,10 +52,10 @@ size_t InferTensorizeRegion(
const
IterVarAttr
&
attr
=
(
*
iit
).
second
;
if
(
!
found_point
)
{
CHECK
(
!
attr
->
bind_thread
.
defined
())
<<
"Donot allow thread in tensorize scope"
;
<<
"Do
not allow thread in tensorize scope"
;
}
if
(
attr
->
iter_type
==
kTensorized
)
{
CHECK
(
!
found_point
)
<<
"Donot allow two tensorized point"
;
CHECK
(
!
found_point
)
<<
"Do
not allow two tensorized point"
;
found_point
=
true
;
loc_scope
=
i
-
1
;
}
...
...
src/runtime/pack_args.h
View file @
1022ad7c
...
...
@@ -168,7 +168,7 @@ inline PackedFunc PackFuncNonBufferArg_(
switch
(
codes
[
i
])
{
case
INT64_TO_INT64
:
case
FLOAT64_TO_FLOAT64
:
{
LOG
(
FATAL
)
<<
"Donot support 64bit argument to device function"
;
break
;
LOG
(
FATAL
)
<<
"Do
not support 64bit argument to device function"
;
break
;
}
case
INT64_TO_INT32
:
{
holder
[
i
].
v_int32
=
static_cast
<
int32_t
>
(
args
.
values
[
base
+
i
].
v_int64
);
...
...
src/runtime/rpc/rpc_session.cc
View file @
1022ad7c
...
...
@@ -250,9 +250,9 @@ class RPCSession::EventHandler : public dmlc::Stream {
this
->
Write
(
arr
->
dtype
);
this
->
WriteArray
(
arr
->
shape
,
arr
->
ndim
);
CHECK
(
arr
->
strides
==
nullptr
)
<<
"Donot support strided remote array"
;
<<
"Do
not support strided remote array"
;
CHECK_EQ
(
arr
->
byte_offset
,
0
)
<<
"Donot support send byte offset"
;
<<
"Do
not support send byte offset"
;
break
;
}
case
kNull
:
break
;
...
...
vta/python/vta/ir_pass.py
View file @
1022ad7c
...
...
@@ -556,7 +556,7 @@ def inject_dma_intrin(stmt_in):
return
irb
.
get
()
else
:
raise
RuntimeError
(
"Donot support copy
%
s->
%
s"
%
(
src
.
scope
,
dst
.
scope
))
raise
RuntimeError
(
"Do
not support copy
%
s->
%
s"
%
(
src
.
scope
,
dst
.
scope
))
return
tvm
.
ir_pass
.
InjectCopyIntrin
(
stmt_in
,
"dma_copy"
,
_inject_copy
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment