Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
7f7dc073
Commit
7f7dc073
authored
Jan 14, 2020
by
Zhi
Committed by
Yizhi Liu
Jan 14, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
use packed func macro for external codegen (#4710)
parent
ce807fe8
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
81 additions
and
104 deletions
+81
-104
python/tvm/_ffi/libinfo.py
+9
-2
src/relay/backend/contrib/codegen_c/codegen.cc
+1
-4
src/relay/backend/contrib/codegen_c/codegen_c.h
+23
-34
src/relay/backend/contrib/dnnl/codegen.cc
+1
-0
tests/python/relay/test_external_runtime.py
+47
-64
No files found.
python/tvm/_ffi/libinfo.py
View file @
7f7dc073
...
@@ -179,13 +179,20 @@ def find_include_path(name=None, search_path=None, optional=False):
...
@@ -179,13 +179,20 @@ def find_include_path(name=None, search_path=None, optional=False):
else
:
else
:
tvm_include_path
=
[
os
.
path
.
join
(
p
,
name
)
for
p
in
header_path
]
tvm_include_path
=
[
os
.
path
.
join
(
p
,
name
)
for
p
in
header_path
]
dlpack_include_path
=
[]
dlpack_include_path
=
[]
dmlc_include_path
=
[]
else
:
else
:
tvm_include_path
=
[
os
.
path
.
join
(
p
,
'include'
)
for
p
in
header_path
]
tvm_include_path
=
[
os
.
path
.
join
(
p
,
'include'
)
for
p
in
header_path
]
dlpack_include_path
=
[
os
.
path
.
join
(
p
,
'dlpack/include'
)
for
p
in
header_path
]
dlpack_include_path
=
[
os
.
path
.
join
(
p
,
'dlpack/include'
)
for
p
in
header_path
]
dmlc_include_path
=
[
os
.
path
.
join
(
p
,
'dmlc-core/include'
)
for
p
in
header_path
]
# try to find include path
# try to find include path
include_found
=
[
p
for
p
in
tvm_include_path
if
os
.
path
.
exists
(
p
)
and
os
.
path
.
isdir
(
p
)]
include_found
=
[
p
for
p
in
tvm_include_path
if
os
.
path
.
exists
(
p
)
and
os
.
path
.
isdir
(
p
)]
include_found
+=
[
p
for
p
in
dlpack_include_path
if
os
.
path
.
exists
(
p
)
and
os
.
path
.
isdir
(
p
)]
include_found
+=
[
p
for
p
in
dlpack_include_path
if
os
.
path
.
exists
(
p
)
and
os
.
path
.
isdir
(
p
)]
include_found
+=
[
p
for
p
in
dmlc_include_path
if
os
.
path
.
exists
(
p
)
and
os
.
path
.
isdir
(
p
)]
if
not
include_found
:
if
not
include_found
:
message
=
(
'Cannot find the files.
\n
'
+
message
=
(
'Cannot find the files.
\n
'
+
...
...
src/relay/backend/contrib/codegen_c/codegen.cc
View file @
7f7dc073
...
@@ -154,12 +154,9 @@ class CSourceCodegen : public CSourceModuleCodegenBase {
...
@@ -154,12 +154,9 @@ class CSourceCodegen : public CSourceModuleCodegenBase {
runtime
::
Module
CreateCSourceModule
(
const
ObjectRef
&
ref
)
override
{
runtime
::
Module
CreateCSourceModule
(
const
ObjectRef
&
ref
)
override
{
// Create headers
// Create headers
code_stream_
<<
"#include <cstdint>
\n
"
;
code_stream_
<<
"#include <iostream>
\n
"
;
code_stream_
<<
"#include <cstdlib>
\n
"
;
code_stream_
<<
"#include <stdio.h>
\n
"
;
code_stream_
<<
"#include <cstring>
\n
"
;
code_stream_
<<
"#include <cstring>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/c_runtime_api.h>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/c_runtime_api.h>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/packed_func.h>
\n
"
;
code_stream_
<<
"#include <dlpack/dlpack.h>
\n
"
;
code_stream_
<<
"#include <dlpack/dlpack.h>
\n
"
;
// Append some common macro for operator definition.
// Append some common macro for operator definition.
...
...
src/relay/backend/contrib/codegen_c/codegen_c.h
View file @
7f7dc073
...
@@ -99,63 +99,52 @@ class CodegenCBase {
...
@@ -99,63 +99,52 @@ class CodegenCBase {
* \code
* \code
*
*
* // An example code for the generated C function.
* // An example code for the generated C function.
* extern "C" void foo(TVMValue* value, int* type_code, int nargs) {
* extern "C" void foo_wrapper_(DLTensor* arg0,
* if (nargs != 3) {
* DLTensor* arg1,
* printf("foo expects 3 args, but received %d\n", nargs);
* DLTensor* out) {
* return 1;
* }
*
* DLTensor* arg0 = static_cast<DLTensor*>(value[0].v_handle);
* DLTensor* arg1 = static_cast<DLTensor*>(value[1].v_handle);
* DLTensor* out = static_cast<DLTensor*>(value[2].v_handle);
*
* foo_(static_cast<float*>(arg0->data),
* foo_(static_cast<float*>(arg0->data),
* static_cast<float*>(arg1->data),
* static_cast<float*>(arg1->data),
* static_cast<float*>(out->data));
* static_cast<float*>(out->data));
* return 0;
* return 0;
* }
* }
*
*
* TVM_DLL_EXPORT_TYPED_FUNC(foo, foo_wrapper_);
*
* \endcode
* \endcode
*/
*/
void
GenerateBackendCFunc
(
const
std
::
string
&
func_name
,
int
arg_cnt
)
{
void
GenerateBackendCFunc
(
const
std
::
string
&
func_name
,
int
arg_cnt
)
{
// Print signature
// Print signature
code_stream_
<<
"
\n
"
;
code_stream_
<<
"
\n
"
;
code_stream_
<<
"extern
\"
C
\"
int "
<<
func_name
;
code_stream_
<<
"extern
\"
C
\"
int "
<<
func_name
<<
"_wrapper_("
;
code_stream_
<<
"(TVMValue* value, int* type_code, int nargs) {
\n
"
;
for
(
int
i
=
0
;
i
<
arg_cnt
-
1
;
i
++
)
{
EnterScope
();
code_stream_
<<
"DLTensor* arg"
<<
i
<<
",
\n
"
;
// Print guard
code_stream_
<<
"
\t
"
;
PrintIndents
();
}
code_stream_
<<
"if (nargs != "
<<
arg_cnt
<<
"){
\n
"
;
if
(
arg_cnt
>
0
)
{
code_stream_
<<
"DLTensor* arg"
<<
arg_cnt
-
1
<<
") {
\n
"
;
}
EnterScope
();
EnterScope
();
PrintIndents
();
code_stream_
<<
"printf(
\"
"
<<
func_name
<<
" expects "
<<
arg_cnt
<<
" arguments, but received %d
\\
n
\"
, nargs);
\n
"
;
PrintIndents
();
code_stream_
<<
"return 1;
\n
"
;
ExitScope
();
PrintIndents
();
code_stream_
<<
"}
\n
"
;
// According to TVM's calling convention, the last one is output.
// Generate the internal call.
for
(
int
i
=
0
;
i
<
arg_cnt
;
i
++
)
{
PrintIndents
();
code_stream_
<<
"DLTensor* arg"
<<
i
<<
" = "
<<
"static_cast<DLTensor*>(value["
<<
i
<<
"].v_handle);
\n
"
;
}
// Generate the call.
PrintIndents
();
PrintIndents
();
code_stream_
<<
func_name
<<
"_("
;
code_stream_
<<
func_name
<<
"_("
;
for
(
int
i
=
0
;
i
<
arg_cnt
-
1
;
i
++
)
{
for
(
int
i
=
0
;
i
<
arg_cnt
-
1
;
i
++
)
{
code_stream_
<<
"static_cast<float*>(arg"
<<
i
<<
"->data), "
;
code_stream_
<<
"static_cast<float*>(arg"
<<
i
<<
"->data),
\n
"
;
PrintIndents
();
}
}
if
(
arg_cnt
>
0
)
{
if
(
arg_cnt
>
0
)
{
code_stream_
<<
"static_cast<float*>(arg"
<<
arg_cnt
-
1
<<
"->data)"
;
code_stream_
<<
"static_cast<float*>(arg"
<<
arg_cnt
-
1
<<
"->data)"
;
}
}
code_stream_
<<
");
\n
\n
"
;
code_stream_
<<
");
\n
"
;
PrintIndents
();
PrintIndents
();
code_stream_
<<
"return 0;
\n
"
;
code_stream_
<<
"return 0;
\n
"
;
ExitScope
();
ExitScope
();
code_stream_
<<
"}"
;
code_stream_
<<
"}
\n\n
"
;
// Generate the macro
code_stream_
<<
"TVM_DLL_EXPORT_TYPED_FUNC("
<<
func_name
<<
", "
<<
func_name
<<
"_wrapper_);
\n\n
"
;
}
}
/*!
/*!
...
...
src/relay/backend/contrib/dnnl/codegen.cc
View file @
7f7dc073
...
@@ -260,6 +260,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase {
...
@@ -260,6 +260,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase {
code_stream_
<<
"#include <cstdlib>
\n
"
;
code_stream_
<<
"#include <cstdlib>
\n
"
;
code_stream_
<<
"#include <cstring>
\n
"
;
code_stream_
<<
"#include <cstring>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/c_runtime_api.h>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/c_runtime_api.h>
\n
"
;
code_stream_
<<
"#include <tvm/runtime/packed_func.h>
\n
"
;
code_stream_
<<
"#include <dlpack/dlpack.h>
\n
"
;
code_stream_
<<
"#include <dlpack/dlpack.h>
\n
"
;
// dnnl_kernel file is saved under src/runtime/contrib/dnnl so that we don't
// dnnl_kernel file is saved under src/runtime/contrib/dnnl so that we don't
// expose it to ordinary users. To make export_library use it, users need to
// expose it to ordinary users. To make export_library use it, users need to
...
...
tests/python/relay/test_external_runtime.py
View file @
7f7dc073
...
@@ -33,6 +33,7 @@ def generate_csource_module():
...
@@ -33,6 +33,7 @@ def generate_csource_module():
code
=
r'''
code
=
r'''
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/packed_func.h>
#include <dlpack/dlpack.h>
#include <dlpack/dlpack.h>
#include <cstdint>
#include <cstdint>
#include <cstring>
#include <cstring>
...
@@ -69,22 +70,17 @@ def generate_csource_module():
...
@@ -69,22 +70,17 @@ def generate_csource_module():
free(buf_1);
free(buf_1);
}
}
extern "C" int json_rt_1(TVMValue* value, int* type_code, int nargs) {
extern "C" int ccompiler_wrapper_1_(DLTensor* arg0, DLTensor* arg1,
if (nargs != 5) {
DLTensor* arg2, DLTensor* arg3,
printf("Expect 5 args, but get
%
d", nargs);
DLTensor* out) {
return 1;
gcc_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
}
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
DLTensor* arg0 = static_cast<DLTensor*>(value[0].v_handle);
static_cast<float*>(out->data));
DLTensor* arg1 = static_cast<DLTensor*>(value[1].v_handle);
return 0;
DLTensor* arg2 = static_cast<DLTensor*>(value[2].v_handle);
DLTensor* arg3 = static_cast<DLTensor*>(value[3].v_handle);
DLTensor* out = static_cast<DLTensor*>(value[4].v_handle);
gcc_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(out->data));
return 0;
}
}
TVM_DLL_EXPORT_TYPED_FUNC(json_rt_1, ccompiler_wrapper_1_);
GCC_BINARY_OP_2D(gcc_0_0, *, 10, 10);
GCC_BINARY_OP_2D(gcc_0_0, *, 10, 10);
GCC_BINARY_OP_2D(gcc_0_1, -, 10, 10);
GCC_BINARY_OP_2D(gcc_0_1, -, 10, 10);
GCC_BINARY_OP_2D(gcc_0_2, +, 10, 10);
GCC_BINARY_OP_2D(gcc_0_2, +, 10, 10);
...
@@ -100,21 +96,17 @@ def generate_csource_module():
...
@@ -100,21 +96,17 @@ def generate_csource_module():
free(buf_1);
free(buf_1);
}
}
extern "C" int json_rt_0(TVMValue* value, int* type_code, int nargs) {
extern "C" int ccompiler_wrapper_0_(DLTensor* arg0, DLTensor* arg1,
if (nargs != 5) {
DLTensor* arg2, DLTensor* arg3,
printf("Expect 5 args, but get
%
d", nargs);
DLTensor* out) {
return 1;
gcc_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
}
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
DLTensor* arg0 = static_cast<DLTensor*>(value[0].v_handle);
static_cast<float*>(out->data));
DLTensor* arg1 = static_cast<DLTensor*>(value[1].v_handle);
return 0;
DLTensor* arg2 = static_cast<DLTensor*>(value[2].v_handle);
DLTensor* arg3 = static_cast<DLTensor*>(value[3].v_handle);
DLTensor* out = static_cast<DLTensor*>(value[4].v_handle);
gcc_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(out->data));
return 0;
}
}
TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, ccompiler_wrapper_0_);
'''
'''
csource_module
=
_tvm_module
.
csource_module_create
(
code
,
"cc"
)
csource_module
=
_tvm_module
.
csource_module_create
(
code
,
"cc"
)
return
csource_module
return
csource_module
...
@@ -128,11 +120,12 @@ def generate_engine_module():
...
@@ -128,11 +120,12 @@ def generate_engine_module():
code
=
r'''
code
=
r'''
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/packed_func.h>
#include <dlpack/dlpack.h>
#include <dlpack/dlpack.h>
#include "
gcc
_engine.h"
#include "
json
_engine.h"
extern "C" void
gcc_1_(float* gcc_input4, float* gcc
_input5,
extern "C" void
json_1_(float* json_input4, float* json
_input5,
float* gcc_input6, float* gcc
_input7, float* out) {
float* json_input6, float* json
_input7, float* out) {
std::string graph =
std::string graph =
"add_2d,10,10\n"
"add_2d,10,10\n"
...
@@ -140,28 +133,22 @@ def generate_engine_module():
...
@@ -140,28 +133,22 @@ def generate_engine_module():
"mul_2d,10,10\n";
"mul_2d,10,10\n";
Engine engine;
Engine engine;
engine.run(graph, {
gcc_input4, gcc_input5, gcc_input6, gcc
_input7}, out);
engine.run(graph, {
json_input4, json_input5, json_input6, json
_input7}, out);
}
}
extern "C" int json_wrapper_1_(DLTensor* arg0, DLTensor* arg1,
extern "C" int json_rt_1(TVMValue* value, int* type_code, int nargs) {
DLTensor* arg2, DLTensor* arg3,
if (nargs != 5) {
DLTensor* out) {
printf("Expect 5 args, but get
%
d", nargs);
json_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
return 1;
}
DLTensor* arg0 = static_cast<DLTensor*>(value[0].v_handle);
DLTensor* arg1 = static_cast<DLTensor*>(value[1].v_handle);
DLTensor* arg2 = static_cast<DLTensor*>(value[2].v_handle);
DLTensor* arg3 = static_cast<DLTensor*>(value[3].v_handle);
DLTensor* out = static_cast<DLTensor*>(value[4].v_handle);
gcc_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(out->data));
static_cast<float*>(out->data));
return 0;
return 0;
}
}
extern "C" void gcc_0_(float* gcc_input0, float* gcc_input1,
TVM_DLL_EXPORT_TYPED_FUNC(json_rt_1, json_wrapper_1_);
float* gcc_input2, float* gcc_input3, float* out) {
extern "C" void json_0_(float* json_input0, float* json_input1,
float* json_input2, float* json_input3, float* out) {
std::string graph =
std::string graph =
"add_2d,10,10\n"
"add_2d,10,10\n"
...
@@ -169,40 +156,36 @@ def generate_engine_module():
...
@@ -169,40 +156,36 @@ def generate_engine_module():
"mul_2d,10,10\n";
"mul_2d,10,10\n";
Engine engine;
Engine engine;
engine.run(graph, {
gcc_input0, gcc_input1, gcc_input2, gcc
_input3}, out);
engine.run(graph, {
json_input0, json_input1, json_input2, json
_input3}, out);
}
}
extern "C" int json_rt_0(TVMValue* value, int* type_code, int nargs) {
extern "C" int json_wrapper_0_(DLTensor* arg0, DLTensor* arg1,
if (nargs != 5) {
DLTensor* arg2, DLTensor* arg3,
printf("Expect 5 args, but get
%
d", nargs);
DLTensor* out) {
return 1;
json_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
}
DLTensor* arg0 = static_cast<DLTensor*>(value[0].v_handle);
DLTensor* arg1 = static_cast<DLTensor*>(value[1].v_handle);
DLTensor* arg2 = static_cast<DLTensor*>(value[2].v_handle);
DLTensor* arg3 = static_cast<DLTensor*>(value[3].v_handle);
DLTensor* out = static_cast<DLTensor*>(value[4].v_handle);
gcc_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),
static_cast<float*>(out->data));
static_cast<float*>(out->data));
return 0;
return 0;
}
}
TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, json_wrapper_0_);
'''
'''
gen_
gcc
_engine
()
gen_
json
_engine
()
csource_module
=
_tvm_module
.
csource_module_create
(
code
,
"cc"
)
csource_module
=
_tvm_module
.
csource_module_create
(
code
,
"cc"
)
return
csource_module
return
csource_module
def
gen_
gcc
_engine
():
def
gen_
json
_engine
():
"""An example of external backend runtime engine. This is supposed to be provided
"""An example of external backend runtime engine. This is supposed to be provided
by third-party vendors and included when building the generated external kernel code.
by third-party vendors and included when building the generated external kernel code.
"""
"""
code
=
r'''
code
=
r'''
#ifndef _
GCC
_ENGINE_H_
#ifndef _
JSON
_ENGINE_H_
#define _
GCC
_ENGINE_H_
#define _
JSON
_ENGINE_H_
#include <cstdint>
#include <cstdint>
#include <string>
#include <string>
#include <sstream>
#include <sstream>
...
@@ -298,9 +281,9 @@ def gen_gcc_engine():
...
@@ -298,9 +281,9 @@ def gen_gcc_engine():
std::vector<float*> buffers;
std::vector<float*> buffers;
};
};
#endif
#endif
// _JSON_ENGINE_H_
'''
'''
header_file
=
tmp_path
.
relpath
(
"
gcc
_engine.h"
)
header_file
=
tmp_path
.
relpath
(
"
json
_engine.h"
)
with
open
(
header_file
,
'w'
)
as
f
:
with
open
(
header_file
,
'w'
)
as
f
:
f
.
write
(
code
)
f
.
write
(
code
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment