diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index aafc301be555..59b9185106f3 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -34,6 +34,7 @@ from .. import qnn as _qnn from .. import ty as _ty from .. import vision as _vision +from .. import random as _random from .common import ( AttrCvt, Renamer, @@ -3329,6 +3330,30 @@ def _impl_v11(cls, inputs, attr, params): return _expr.TupleWrapper(_expr.Tuple([unique_vals, indices, inverse_indices, counts]), 4) +class RandomUniform(OnnxOpConverter): + """Operator converter for random_uniform""" + + @classmethod + def _impl_v1(cls, inputs, attr, params): + dtype = get_type(attr.get("dtype", 1)) + high = attr.get("high", 1.0) + low = attr.get("low", 0.0) + seed = attr.get("seed", None) + shape = attr["shape"] + + assert dtype in [ + "float32", + "float64", + ], "Only float random value generation is currently supported." + + if seed is None: + seed = np.random.randint(1e6) + key = _random.threefry_key(seed) + output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high) + _, vals = _expr.TupleWrapper(output, 2) + return vals + + # compatible operators that do NOT require any conversion. _identity_list = [] @@ -3507,6 +3532,8 @@ def _get_convert_map(opset): "QLinearConv": QLinearConv.get_converter(opset), "QLinearAdd": QLinearAdd.get_converter(opset), "ConvInteger": ConvInteger.get_converter(opset), + # Random number generation. + "RandomUniform": RandomUniform.get_converter(opset), } diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 049ca1e0cfe0..f7bae5da79e1 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4870,6 +4870,66 @@ def test_qlinearadd(): verify_qlinearadd([5, 1, 7], [2, 7], [5, 2, 7]) +def get_random_uniform(shape, dtype="float32", high=1.0, low=0.0, seed=None, target="llvm"): + ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)] + node = helper.make_node( + "RandomUniform", [], ["out"], shape=shape, dtype=ONNX_DTYPE, high=high, low=low + ) + if seed is not None: + seed_attr = helper.make_attribute("seed", seed) + node.attribute.append(seed_attr) + + graph = helper.make_graph( + [node], + "random_uniform_test", + inputs=[], + outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, shape)], + ) + model = helper.make_model(graph, producer_name="random_uniform_test") + return get_tvm_output_with_vm(model, [], target=target, device=tvm.device(target, 0)) + + +def test_random_uniform(): + targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()] + for target in targets: + # Check that function runs and produces proper shape. + vals = get_random_uniform([10], dtype="float32", target=target) + assert list(vals.shape) == [10] + assert vals.dtype == "float32" + + # Test N-D tensor generation. + vals = get_random_uniform([1, 3, 100, 100], dtype="float32", target=target) + assert list(vals.shape) == [1, 3, 100, 100] + + # Check that bounds aren't exceeded. + vals = get_random_uniform(shape=[100], high=100, low=-100) + assert list(vals.shape) == [100] + assert all(vals >= -100) and all(vals <= 100) + + # Check that a fixed seed produces the same values when run twice. + vals_1 = get_random_uniform(shape=[10], seed=1) + vals_2 = get_random_uniform(shape=[10], seed=1) + assert all(vals_1 == vals_2) + + # Test against an expected output with a fixed seed. + real = get_random_uniform(shape=[10], seed=5) + expected = np.asarray( + [ + 0.8614111, + 0.46572232, + 0.6007328, + 0.21619737, + 0.6361222, + 0.7298056, + 0.13094282, + 0.03556716, + 0.32997167, + 0.2977605, + ] + ) + tvm.testing.assert_allclose(real, expected, rtol=1e-5) + + def verify_convinteger( x_shape, w_shape, @@ -5108,5 +5168,6 @@ def repeat(N, D): test_reverse_sequence() test_eyelike() test_qlinearconv() + test_random_uniform() test_convinteger() test_batch_matmul()