Commit a711f38e by SWu Committed by Wuwei Lin

Improve numerical gradient check (#3856)

parent 2ebf1bd1
...@@ -56,72 +56,71 @@ def run_infer_type(expr): ...@@ -56,72 +56,71 @@ def run_infer_type(expr):
return run_opt_pass(expr, transform.InferType()) return run_opt_pass(expr, transform.InferType())
def rand_from_type(t): def _np_randn_from_type(t, scale=1):
return relay.Constant(rand(t.dtype, *[int(d) for d in t.shape])) return (scale * np.random.randn(*(int(d) for d in t.shape))).astype(t.dtype)
CHECK_GRAD_COUNTER = 0 def check_grad(func, inputs=None, eps=1e-6, atol=1e-5, rtol=1e-3):
def check_grad(func, mod=None): """Perform numerical gradient checking given a relay function.
"""
Test that directional gradient calculated by reverse mode Compare analytical gradients to numerical gradients derived from two-sided approximation. Note
is close to the one calculated by finite difference. that this test may fail if your function input types are not of high enough precision.
Parameters
----------
func : tvm.relay.Function
The relay function to test.
inputs: List[np.array]
Optional user-provided input parameters to use. If not given, will generate random normal
inputs scaled to be close to the chosen epsilon value to avoid numerical precision loss.
eps: float
The epsilon value to use for computing numerical gradient approximation.
atol: float
The absolute tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps and inputs.
rtol: float
The relative tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps.
""" """
global CHECK_GRAD_COUNTER
if mod is None: fwd_func = run_infer_type(func)
mod = relay.Module() bwd_func = run_infer_type(gradient(fwd_func))
def make(name):
return GlobalVar(name + str(CHECK_GRAD_COUNTER)) if inputs is None:
func_name = make("func_") params = fwd_func.params
back_func_name = make("back_func_") # Generate random inputs on the same scale as epsilon to avoid numerical precision loss.
finite_difference_func_name = make("finite_difference_") inputs = [_np_randn_from_type(x.checked_type, scale=(10 * eps)) for x in params]
reverse_mode_func_name = make("reverse_mode_")
check_func_name = make("check_func_") for target, ctx in ctx_list():
CHECK_GRAD_COUNTER = CHECK_GRAD_COUNTER + 1 intrp = relay.create_executor(ctx=ctx, target=target)
epsilon = relay.const(0.01)
mod[func_name] = func # Get analytic gradients.
mod[back_func_name] = gradient(mod[func_name], mod=mod) _, grads = intrp.evaluate(bwd_func)(*inputs)
params = mod[func_name].params grads = [grad.asnumpy().astype("float64") for grad in grads]
directions = [rand_from_type(x.checked_type) for x in params]
ft = TensorType(()) # Get numeric gradients for each dimension of each param, using two-sided approximation.
sb = ScopeBuilder() approx_grads = []
def get_reverse_mode_result(e, d, t): for x in inputs:
assert isinstance(t, TensorType) approx_grad = np.zeros(x.shape)
return op.cast(e * d, 'float32') for i in np.ndindex(*x.shape):
bf = sb.let("bf", TupleGetItem(back_func_name(*params), 1)) x_i = x[i]
reverse_mode_results = [get_reverse_mode_result(TupleGetItem(bf, i), x[i] = x_i + eps
directions[i], fwd_plus = intrp.evaluate(fwd_func)(*inputs).asnumpy().astype("float64")
x.checked_type) x[i] = x_i - eps
for i, x in enumerate(params)] fwd_minus = intrp.evaluate(fwd_func)(*inputs).asnumpy().astype("float64")
reverse_mode_result = relay.const(0.0) x[i] = x_i
for x in reverse_mode_results: approx_grad[i] = np.sum((fwd_plus - fwd_minus) / (2 * eps))
reverse_mode_result = reverse_mode_result + op.reduce.sum(x) approx_grads.append(approx_grad)
sb.ret(reverse_mode_result)
reverse_mode_result = sb.get() # Compare gradients by checking that relative difference is below tolerance.
mod[reverse_mode_func_name] = Function(params, for grad, approx_grad in zip(grads, approx_grads):
reverse_mode_result, np.testing.assert_allclose(grad, approx_grad, atol=atol, rtol=rtol)
ft,
mod[func_name].type_params,
mod[func_name].attrs)
finite_difference_result = op.reduce.sum((func_name(*[x + epsilon * y for x, y in
zip(params, directions)]) -
func_name(*params)) /
epsilon)
mod[finite_difference_func_name] = Function(params,
finite_difference_result,
ft,
mod[func_name].type_params,
mod[func_name].attrs)
check_func_result = op.abs(reverse_mode_func_name(*params) -
finite_difference_func_name(*params))
mod[check_func_name] = Function(params,
check_func_result,
ft,
mod[func_name].type_params,
mod[func_name].attrs)
ex = create_executor(mod=mod)
res = ex.evaluate(check_func_name(*[rand_from_type(x.checked_type) for x in params]))
assert res.data.asnumpy() < 0.001
def rand(dtype, *shape): def rand(dtype, *shape):
return tvm.nd.array(np.random.rand(*shape).astype(dtype)) return tvm.nd.array(np.random.rand(*shape).astype(dtype))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment