Unverified Commit 06bb17ec by Samuel Committed by GitHub

Tensorflow script upgrade from 1.13.1 to 2.0.0, so that it can run in both versionsw (#4963)

parent 11ee1a0e
......@@ -1259,7 +1259,7 @@ def _broadcast(name):
def _impl(inputs, attr, params):
return AttrCvt(
op_name=name,
ignores=['name', 'Tidx']
ignores=['name', 'incompatible_shape_error', 'Tidx']
)(inputs, attr)
return _impl
......
......@@ -73,7 +73,7 @@ class TFParser(object):
def _get_output_names(self):
"""Return the concatenated output names"""
try:
import tensorflow as tf
import tensorflow.compat.v1 as tf
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import tensorflow which is "
......
......@@ -219,9 +219,9 @@ def get_workload(model_path, model_sub_path=None):
# Creates graph from saved graph_def.pb.
with tf_compat_v1.gfile.FastGFile(path_model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
graph = tf_compat_v1.import_graph_def(graph_def, name='')
return graph_def
#######################################################################
......
......@@ -22,11 +22,16 @@ from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
import keras
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow import keras as tf_keras
from packaging import version as package_version
# prevent Keras from using up all gpu memory
if tf.executing_eagerly():
gpus = tf.config.list_physical_devices('GPU')
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
else:
......@@ -363,7 +368,7 @@ class TestKeras:
keras.layers.SimpleRNN(units=16, return_state=False,
activation='tanh'),
keras.layers.GRU(units=16, return_state=False,
recurrent_activation='sigmoid', activation='tanh')]
recurrent_activation='sigmoid', activation='tanh', reset_after=False)]
for rnn_func in rnn_funcs:
x = rnn_func(data)
keras_model = keras.models.Model(data, x)
......
......@@ -16,7 +16,11 @@
# under the License.
"""Unit tests for converting TensorFlow control flow op to Relay."""
import pytest
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
import numpy as np
from tvm import nd
from tvm import relay
......
......@@ -15,7 +15,11 @@
# specific language governing permissions and limitations
# under the License.
"""Unit tests for converting TensorFlow debugging ops to Relay."""
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
import numpy as np
from tvm import relay
from tvm.relay.frontend.tensorflow import from_tensorflow
......
......@@ -23,7 +23,10 @@ This article is a test script to test tensorflow operator with Relay.
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
......@@ -38,6 +41,7 @@ import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from packaging import version as package_version
#######################################################################
# Generic run functions for TVM & tensorflow
......@@ -1040,6 +1044,7 @@ def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
......@@ -1187,6 +1192,7 @@ def _test_stridedslice(ip_shape, begin, end, stride, dtype,
""" One iteration of a Stridedslice """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(in_data, begin, end, stride, begin_mask=begin_mask,
end_mask=end_mask, new_axis_mask=new_axis_mask,
......@@ -1255,6 +1261,7 @@ def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name='RealDiv')
......@@ -1265,6 +1272,7 @@ def _test_forward_divide(ip_shape, dtype):
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name='FloorDiv')
compare_tf_with_tvm([np_numer], ['numer:0'], 'FloorDiv:0')
......@@ -1284,6 +1292,7 @@ def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name='FloorMod')
......@@ -1304,6 +1313,7 @@ def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name='truncatemod')
......@@ -1324,6 +1334,7 @@ def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis)
......@@ -1360,6 +1371,7 @@ def test_forward_gather_nd():
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=(2, 2)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 2), name="in_data")
tf.gather_nd(in_data, indices=[[1, 0], [0, 1]], name="gather_nd")
compare_tf_with_tvm([np_data], ['in_data:0'], 'gather_nd:0')
......@@ -1374,6 +1386,7 @@ def test_forward_bias_add():
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
......@@ -1393,6 +1406,7 @@ def _test_split(in_shape, axis, num_or_size_splits, dtype):
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = len(num_or_size_splits) if isinstance(num_or_size_splits, list)\
else num_or_size_splits
......@@ -1403,11 +1417,11 @@ def _test_split(in_shape, axis, num_or_size_splits, dtype):
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], 'in_data:0', 'concat:0')
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], 'in_data:0', concat.name)
def test_forward_split():
......@@ -1445,6 +1459,7 @@ def test_forward_split():
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name='TopK')
compare_tf_with_tvm([np_data], ['in_data:0'], 'TopK:0')
......@@ -1465,12 +1480,14 @@ def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ['in_data:0'], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
......@@ -1493,6 +1510,7 @@ def test_forward_unstack():
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ['in_data:0'], 'tile:0')
......@@ -1511,6 +1529,7 @@ def test_forward_tile():
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min,
clip_value_max, name="ClipByValue")
......@@ -1759,7 +1778,8 @@ def _test_forward_crop_and_resize(img_shape, boxes, box_idx, crop_size,
extrapolation_value=0.0, method='bilinear', dtype="float32"):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
in_data = tf.placeholder(dtype, image.shape, name="in_data")
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(in_data, boxes=boxes, box_ind=box_idx,
crop_size=crop_size, method=method,
extrapolation_value=extrapolation_value,
......@@ -1821,8 +1841,8 @@ def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
m1 = array_ops.zeros([batch_size, num_hidden])
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype)
g, ((out_m0, out_m1)) = \
tf.contrib.rnn.LSTMBlockCell(num_hidden,
forget_bias=forget_bias)(x, ((m0, m1)))
tensorflow.contrib.rnn.LSTMBlockCell(num_hidden,
forget_bias=forget_bias)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
......@@ -1888,6 +1908,7 @@ def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ['in_data:0'], 'Unpack:0')
......@@ -1906,11 +1927,13 @@ def test_forward_unpack():
def test_forward_range():
"""test operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range")
compare_tf_with_tvm([], [], 'range:0')
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], 'range:0')
......@@ -2173,8 +2196,11 @@ def test_forward_placeholder():
#######################################################################
# PTB
# ---
dir(tf.contrib)
try:
#Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
'''test ptb model'''
......@@ -2480,6 +2506,7 @@ def test_forward_softmax():
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ['in_data:0'], 'Softmax:0')
......@@ -2495,6 +2522,7 @@ def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ['in_data:0'], 'round:0')
......@@ -2504,6 +2532,7 @@ def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ['in_data:0'], 'abs:0')
......@@ -2512,6 +2541,7 @@ def test_forward_abs():
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ['in_data:0'], 'zeros_like:0')
......@@ -2552,6 +2582,7 @@ def test_forward_squared_difference():
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ['in_data:0'], 'reverse:0')
......@@ -2570,6 +2601,7 @@ def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ['in_data:0'], 'sign:0')
......@@ -2579,6 +2611,7 @@ def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ['in_data:0'], 'square:0')
......@@ -2589,6 +2622,7 @@ def test_forward_pow_exp():
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
......@@ -2601,6 +2635,7 @@ def test_forward_log():
"""test operator Log """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.log(in_data, name="log")
compare_tf_with_tvm([np_data], ['in_data:0'], 'log:0')
......@@ -2610,6 +2645,7 @@ def test_forward_log1p():
"""test operator Log1p """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.log1p(in_data, name="log1p")
compare_tf_with_tvm([np_data], ['in_data:0'], 'log1p:0')
......@@ -2619,6 +2655,7 @@ def test_forward_cos():
"""test operator cos """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.cos(in_data, name="cos")
compare_tf_with_tvm([np_data], ['in_data:0'], 'cos:0')
......@@ -2637,6 +2674,7 @@ def test_forward_sin():
"""test operator sin """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.sin(in_data, name="sin")
compare_tf_with_tvm([np_data], ['in_data:0'], 'sin:0')
......@@ -2647,6 +2685,7 @@ def test_forward_negative():
np_data = np.random.uniform(-100, 255,
size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ['in_data:0'], 'negative:0')
......@@ -2656,6 +2695,7 @@ def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ['in_data:0'], 'LogSoftmax:0')
......@@ -2665,6 +2705,7 @@ def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ['in_data:0'], 'softplus:0')
......@@ -2674,6 +2715,7 @@ def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ['in_data:0'], 'rsqrt:0')
......@@ -2683,6 +2725,7 @@ def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ['in_data:0'], 'sqrt:0')
......@@ -2693,6 +2736,7 @@ def _test_forward_right_shift(in_shape, dtype):
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
......@@ -2710,6 +2754,7 @@ def _test_forward_left_shift(in_shape, dtype):
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
......@@ -2769,6 +2814,7 @@ def test_forward_reduce_all():
"""Test the All operator."""
np_data = np.random.choice([True, False], size=(5, 7, 11))
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.bool, (5, 7, 11), name="in_data")
tf.reduce_all(in_data, name="all")
compare_tf_with_tvm([np_data], ['in_data:0'], 'all:0')
......@@ -2777,6 +2823,7 @@ def test_forward_reduce_any():
"""Test the Any operator."""
np_data = np.random.choice([True, False], size=(5, 7, 11))
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.bool, (5, 7, 11), name="in_data")
tf.reduce_any(in_data, name="any")
compare_tf_with_tvm([np_data], ['in_data:0'], 'any:0')
......@@ -2785,6 +2832,7 @@ def test_forward_reduce_max():
def check_max(ishape, axis, keepdims, dtype):
tf.reset_default_graph()
np_data = np.random.uniform(size=ishape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
tf.math.reduce_max(in_data, axis=axis,
keepdims=keepdims, name="reduce_max")
......@@ -2799,6 +2847,7 @@ def test_forward_reduce_min():
def check_min(ishape, axis, keepdims, dtype):
tf.reset_default_graph()
np_data = np.random.uniform(size=ishape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
tf.math.reduce_min(in_data, axis=axis,
keepdims=keepdims, name="reduce_max")
......@@ -2840,6 +2889,7 @@ def test_forward_rel_ops():
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name='in1')
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
......@@ -2883,6 +2933,7 @@ def test_forward_maximum():
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
......@@ -2899,6 +2950,7 @@ def test_forward_minimum():
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
......@@ -3089,6 +3141,8 @@ if __name__ == '__main__':
test_forward_ptb()
# RNN
if package_version.parse(tf.VERSION) < package_version.parse('2.0.0'):
#in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
test_forward_lstm()
# Elementwise
......
......@@ -15,7 +15,10 @@
# specific language governing permissions and limitations
# under the License.
"""Unit tests for converting TensorFlow debugging ops to Relay."""
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
import numpy as np
from tvm import relay
from tvm.relay.frontend.tensorflow import from_tensorflow
......
......@@ -26,7 +26,10 @@ import numpy as np
import tvm
from tvm import te
from tvm import relay
import tensorflow as tf
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
......@@ -156,7 +159,7 @@ def compare_tflite_with_tvm(in_data, in_name, input_tensors,
if init_global_variables:
sess.run(variables.global_variables_initializer())
# convert to tflite model
converter = interpreter_wrapper.TFLiteConverter.from_session(
converter = tf.lite.TFLiteConverter.from_session(
sess, input_tensors, output_tensors)
if quantized:
......
......@@ -99,8 +99,12 @@ tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Load a test image
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment