From d0ce50ddb41618d04459f70b394d3dba43f1dbe8 Mon Sep 17 00:00:00 2001 From: Sudarsan Mansingh Date: Wed, 8 Feb 2023 01:00:37 +0530 Subject: [PATCH] replaced true_div with true_divide --- README.rst | 2 +- aesara/scalar/basic.py | 8 ++-- aesara/scalar/math.py | 4 +- aesara/sparse/basic.py | 2 +- aesara/tensor/elemwise.py | 2 +- aesara/tensor/math.py | 14 +++--- aesara/tensor/nnet/basic.py | 6 +-- aesara/tensor/nnet/batchnorm.py | 4 +- aesara/tensor/rewriting/math.py | 62 +++++++++++++------------ aesara/tensor/rewriting/special.py | 6 +-- aesara/tensor/var.py | 6 +-- doc/extending/graph_rewriting.rst | 32 ++++++------- doc/library/config.rst | 2 +- doc/tutorial/printing_drawing.rst | 2 +- tests/scalar/test_basic.py | 12 ++--- tests/sparse/test_var.py | 2 +- tests/tensor/rewriting/test_basic.py | 8 ++-- tests/tensor/rewriting/test_elemwise.py | 6 +-- tests/tensor/rewriting/test_math.py | 14 +++--- tests/tensor/rewriting/test_special.py | 4 +- tests/tensor/test_extra_ops.py | 2 +- tests/tensor/test_math.py | 8 ++-- 22 files changed, 105 insertions(+), 103 deletions(-) diff --git a/README.rst b/README.rst index c3eac88a4c..9b7455d017 100644 --- a/README.rst +++ b/README.rst @@ -60,7 +60,7 @@ Getting started aesara.dprint(d) # Elemwise{add,no_inplace} [id A] '' # |InplaceDimShuffle{x} [id B] '' - # | |Elemwise{true_div,no_inplace} [id C] '' + # | |Elemwise{true_divide,no_inplace} [id C] '' # | |a [id D] # | |a [id D] # |dot [id E] '' diff --git a/aesara/scalar/basic.py b/aesara/scalar/basic.py index 8c89f81ce7..310d378fa2 100644 --- a/aesara/scalar/basic.py +++ b/aesara/scalar/basic.py @@ -793,8 +793,8 @@ def __sub__(self, other): def __mul__(self, other): return mul(self, other) - def __truediv__(self, other): - return true_div(self, other) + def __truedivide__(self, other): + return true_divide(self, other) def __floordiv__(self, other): return int_div(self, other) @@ -2035,7 +2035,7 @@ def grad(self, inputs, gout): return first_part, second_part -true_div = TrueDiv(upcast_out, name="true_div") +true_divide = TrueDiv(upcast_out, name="true_divide") class IntDiv(BinaryScalarOp): @@ -2869,7 +2869,7 @@ def c_code(self, node, name, inputs, outputs, sub): pprint.assign(mul, printing.OperatorPrinter("*", -1, "either")) pprint.assign(sub, printing.OperatorPrinter("-", -2, "left")) pprint.assign(neg, printing.OperatorPrinter("-", 0, "either")) -pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left")) +pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left")) pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left")) pprint.assign(pow, printing.OperatorPrinter("**", 1, "right")) pprint.assign(mod, printing.OperatorPrinter("%", -1, "left")) diff --git a/aesara/scalar/math.py b/aesara/scalar/math.py index c62196adc1..b7d467fa93 100644 --- a/aesara/scalar/math.py +++ b/aesara/scalar/math.py @@ -28,7 +28,7 @@ log, log1p, switch, - true_div, + true_divide, upcast, upgrade_to_float, upgrade_to_float64, @@ -1241,7 +1241,7 @@ def impl(self, x): def grad(self, inp, grads): (x,) = inp (gz,) = grads - res = true_div(-1.0, expm1(-x)) + res = true_divide(-1.0, expm1(-x)) # Correct gradient at 0.0 to be -inf res = switch(isinf(res), -np.inf, res) return [gz * res] diff --git a/aesara/sparse/basic.py b/aesara/sparse/basic.py index ea4d7729b6..4703bfd15b 100644 --- a/aesara/sparse/basic.py +++ b/aesara/sparse/basic.py @@ -323,7 +323,7 @@ def to_dense(self, *args, **kwargs): "__pow__", "__mod__", "__divmod__", - "__truediv__", + "__truedivide__", "__floordiv__", "reshape", "dimshuffle", diff --git a/aesara/tensor/elemwise.py b/aesara/tensor/elemwise.py index 2b3fd11748..d86629ca1b 100644 --- a/aesara/tensor/elemwise.py +++ b/aesara/tensor/elemwise.py @@ -325,7 +325,7 @@ class Elemwise(OpenMPOp): -``Elemwise(add, {0 : 1})``: represents ``+=`` on the second argument ``y += x`` -``Elemwise(mul)(np.random.random((10, 5)), np.random.random((1, 5)))``: the second input is completed along the first dimension to match the first input - -``Elemwise(true_div)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the + -``Elemwise(true_divide)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the second dimension -``Elemwise(int_div)(np.random.random((1, 5)), np.random.random((10, 1)))``: the output has size ``(10, 5)``. diff --git a/aesara/tensor/math.py b/aesara/tensor/math.py index c602d4d6b1..d5d2d6c6a8 100644 --- a/aesara/tensor/math.py +++ b/aesara/tensor/math.py @@ -1608,7 +1608,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None) # Cast shp into a float type # TODO Once we have a consistent casting policy, we could simply - # use true_div. + # use true_divide. if s.dtype in ("float16", "float32", "complex64"): shp = cast(shp, "float32") else: @@ -1625,7 +1625,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None) # This sequential division will possibly be optimized by Aesara: for i in axis: - s = true_div(s, shp[i]) + s = true_divide(s, shp[i]) # This can happen when axis is an empty list/tuple if s.dtype != shp.dtype and s.dtype in discrete_dtypes: @@ -1697,7 +1697,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False): shp = shape(input) - ddof v = sum((centered_input**two), axis=axis, keepdims=keepdims) for i in axis: - v = true_div(v, shp[i]) + v = true_divide(v, shp[i]) # use 'corrected_two_pass' algorithm if corrected: @@ -1708,7 +1708,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False): shp_inp = shape(input) error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2 for i in axis: - error = true_div(error, shp[i] * shp_inp[i]) + error = true_divide(error, shp[i] * shp_inp[i]) v = v - error v.name = "var" @@ -1794,7 +1794,7 @@ def mul(a, *other_terms): @scalar_elemwise -def true_div(a, b): +def true_divide(a, b): """elementwise [true] division (inverse of multiplication)""" # see decorator for function body @@ -1876,7 +1876,7 @@ def clip(x, min, max): pprint.assign(mul, printing.OperatorPrinter("*", -1, "either")) pprint.assign(sub, printing.OperatorPrinter("-", -2, "left")) pprint.assign(neg, printing.OperatorPrinter("-", 0, "either")) -pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left")) +pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left")) pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left")) pprint.assign(pow, printing.OperatorPrinter("**", 1, "right")) @@ -3121,7 +3121,7 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None "add", "sub", "mul", - "true_div", + "true_divide", "int_div", "floor_div", "ceil_intdiv", diff --git a/aesara/tensor/nnet/basic.py b/aesara/tensor/nnet/basic.py index 61eaf4584c..2e345398ea 100644 --- a/aesara/tensor/nnet/basic.py +++ b/aesara/tensor/nnet/basic.py @@ -40,7 +40,7 @@ softplus, ) from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import tanh, tensordot, true_div +from aesara.tensor.math import tanh, tensordot, true_divide from aesara.tensor.nnet.blocksparse import sparse_block_dot from aesara.tensor.rewriting.basic import ( register_canonicalize, @@ -1342,7 +1342,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node): out_grad = -out_grad incr = incr.owner.inputs[0] - if incr.owner and incr.owner.op == true_div: + if incr.owner and incr.owner.op == true_divide: num, denom = incr.owner.inputs # set out_grad according to the numerator, it may be divided later @@ -1406,7 +1406,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node): # it was really case 1. # Second case - elif d_sm.owner and d_sm.owner.op == true_div: + elif d_sm.owner and d_sm.owner.op == true_divide: # we're looking for # AdvIncSubtensor(zeros, grad_nll, arange(len(y)), y) / softmax try: diff --git a/aesara/tensor/nnet/batchnorm.py b/aesara/tensor/nnet/batchnorm.py index 2edf3675e4..2a30986771 100644 --- a/aesara/tensor/nnet/batchnorm.py +++ b/aesara/tensor/nnet/batchnorm.py @@ -5,7 +5,7 @@ from aesara.graph.basic import Apply from aesara.graph.op import Op from aesara.graph.rewriting.basic import copy_stack_trace, node_rewriter -from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_div +from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_divide from aesara.tensor import basic as at from aesara.tensor.basic import as_tensor_variable from aesara.tensor.elemwise import Elemwise @@ -27,7 +27,7 @@ def __init__(self, dtype): std = aesara.scalar.ScalarType(dtype=dtype).make_variable() gamma = aesara.scalar.ScalarType(dtype=dtype).make_variable() beta = aesara.scalar.ScalarType(dtype=dtype).make_variable() - o = add(mul(true_div(sub(x, mean), std), gamma), beta) + o = add(mul(true_divide(sub(x, mean), std), gamma), beta) inputs = [x, mean, std, gamma, beta] outputs = [o] super().__init__(inputs, outputs) diff --git a/aesara/tensor/rewriting/math.py b/aesara/tensor/rewriting/math.py index 48df59e461..746b76f6b5 100644 --- a/aesara/tensor/rewriting/math.py +++ b/aesara/tensor/rewriting/math.py @@ -70,7 +70,7 @@ from aesara.tensor.math import pow as at_pow from aesara.tensor.math import prod, reciprocal, sgn, sigmoid, softplus, sqr, sqrt, sub from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import true_div +from aesara.tensor.math import true_divide from aesara.tensor.rewriting.basic import ( broadcast_like, encompasses_broadcastable, @@ -568,7 +568,7 @@ def local_mul_switch_sink(fgraph, node): @register_canonicalize -@node_rewriter([true_div, int_div]) +@node_rewriter([true_divide, int_div]) def local_div_switch_sink(fgraph, node): """ This rewrite makes the following changes in the graph: @@ -584,7 +584,7 @@ def local_div_switch_sink(fgraph, node): See `local_mul_switch_sink` for more details. """ - if node.op != true_div and node.op != int_div: + if node.op != true_divide and node.op != int_div: return False op = node.op if node.inputs[0].owner and node.inputs[0].owner.op == switch: @@ -660,7 +660,7 @@ class AlgebraicCanonizer(NodeRewriter): mul inverse An `Op` class such that ``inverse(main(x, y), y) == x`` - (e.g. `sub` or `true_div`). + (e.g. `sub` or `true_divide`). reciprocal A function such that ``main(x, reciprocal(y)) == inverse(x, y)`` (e.g. `neg` or `reciprocal`). @@ -679,7 +679,7 @@ class AlgebraicCanonizer(NodeRewriter): >>> from aesara.tensor.rewriting.math import AlgebraicCanonizer >>> add_canonizer = AlgebraicCanonizer(add, sub, neg, \\ ... lambda n, d: sum(n) - sum(d)) - >>> mul_canonizer = AlgebraicCanonizer(mul, true_div, inv, \\ + >>> mul_canonizer = AlgebraicCanonizer(mul, true_divide, inv, \\ ... lambda n, d: prod(n) / prod(d)) Examples of rewrites `mul_canonizer` can perform: @@ -1104,7 +1104,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None): local_mul_canonizer = AlgebraicCanonizer( - mul, true_div, reciprocal, mul_calculate, False + mul, true_divide, reciprocal, mul_calculate, False ) register_canonicalize(local_mul_canonizer, name="local_mul_canonizer") @@ -1459,7 +1459,7 @@ def local_sum_prod_div_dimshuffle(fgraph, node): if axis is None: axis = list(range(node.inputs[0].ndim)) node_input = node.inputs[0] - if node_input.owner and node_input.owner.op == true_div: + if node_input.owner and node_input.owner.op == true_divide: numerator, denominator = node_input.owner.inputs if denominator.owner and isinstance(denominator.owner.op, DimShuffle): @@ -1505,13 +1505,13 @@ def local_sum_prod_div_dimshuffle(fgraph, node): if isinstance(node.op, Sum): op_on_compatible_dims = at_sum(numerator, axis=compatible_dims) - rval = true_div(op_on_compatible_dims, optimized_dimshuffle) + rval = true_divide(op_on_compatible_dims, optimized_dimshuffle) if len(reordered_incompatible_dims) > 0: rval = at_sum(rval, axis=reordered_incompatible_dims) elif isinstance(node.op, Prod): op_on_compatible_dims = prod(numerator, axis=compatible_dims) dtype = numerator.dtype - rval = true_div( + rval = true_divide( op_on_compatible_dims, ( optimized_dimshuffle @@ -1803,18 +1803,18 @@ def local_neg_div_neg(fgraph, node): """ if node.op == neg: - if node.inputs[0].owner and node.inputs[0].owner.op == true_div: + if node.inputs[0].owner and node.inputs[0].owner.op == true_divide: frac = node.inputs[0] num, denom = frac.owner.inputs if num.owner and num.owner.op == neg: if len(fgraph.clients[frac]) == 1: # No other clients of the original division new_num = num.owner.inputs[0] - return [true_div(new_num, denom)] + return [true_divide(new_num, denom)] elif all(num.broadcastable) and isinstance(num, Constant): if len(fgraph.clients[frac]) == 1: new_num = -num.data - return [true_div(new_num, denom)] + return [true_divide(new_num, denom)] @register_canonicalize @@ -1888,9 +1888,9 @@ def local_mul_zero(fgraph, node): # TODO: Add this to the canonicalization to reduce redundancy. @register_specialize -@node_rewriter([true_div]) +@node_rewriter([true_divide]) def local_div_to_reciprocal(fgraph, node): - if node.op == true_div and np.all(get_constant(node.inputs[0]) == 1.0): + if node.op == true_divide and np.all(get_constant(node.inputs[0]) == 1.0): out = node.outputs[0] new_out = reciprocal(local_mul_canonizer.merge_num_denum(node.inputs[1:], [])) # The ones could have forced upcasting @@ -1951,7 +1951,7 @@ def local_intdiv_by_one(fgraph, node): @register_canonicalize @register_specialize -@node_rewriter([int_div, true_div]) +@node_rewriter([int_div, true_divide]) def local_zero_div(fgraph, node): """0 / x -> 0""" if isinstance(node.op, Elemwise) and isinstance( @@ -2238,13 +2238,13 @@ def local_abs_lift(fgraph, node): assert node.nin == 1 if node.inputs[0].owner.op == mul: return [mul(*[at_abs(i) for i in node.inputs[0].owner.inputs])] - if node.inputs[0].owner.op == true_div: + if node.inputs[0].owner.op == true_divide: i = node.inputs[0].owner.inputs - return [true_div(at_abs(i[0]), at_abs(i[1]))] + return [true_divide(at_abs(i[0]), at_abs(i[1]))] @register_specialize -@node_rewriter([mul, true_div]) +@node_rewriter([mul, true_divide]) def local_abs_merge(fgraph, node): """ Merge abs generated by local_abs_lift when the canonizer don't @@ -2268,12 +2268,14 @@ def local_abs_merge(fgraph, node): return False return [at_abs(mul(*inputs))] if ( - node.op == true_div + node.op == true_divide and sum(i.owner.op == at_abs for i in node.inputs if i.owner) == 2 ): return [ at_abs( - true_div(node.inputs[0].owner.inputs[0], node.inputs[1].owner.inputs[0]) + true_divide( + node.inputs[0].owner.inputs[0], node.inputs[1].owner.inputs[0] + ) ) ] @@ -2516,7 +2518,7 @@ def attempt_distribution(factor, num, denum, out_type): @register_canonicalize @register_stabilize -@node_rewriter([mul, true_div, reciprocal]) +@node_rewriter([mul, true_divide, reciprocal]) def local_greedy_distributor(fgraph, node): """Reduce the number of multiplications and/or divisions. @@ -2529,7 +2531,7 @@ def local_greedy_distributor(fgraph, node): The following expressions are simplified: 1. ``((a/x + b/y) * x * y) -> a*y + b*x`` 2. ``((a/x + b) * x) -> a + b*x`` - 3. There are other forms too where node is a true_div. + 3. There are other forms too where node is a true_divide. The following expressions are not simplified: 4. ``((a + b) * x) /> a*x + b*x`` @@ -2708,7 +2710,7 @@ def local_log_erfc(fgraph, node): @register_stabilize @register_specialize -@node_rewriter([true_div]) +@node_rewriter([true_divide]) def local_grad_log_erfc_neg(fgraph, node): """Stability rewrite for the grad of ``log(erfc(x))``. @@ -2729,7 +2731,7 @@ def local_grad_log_erfc_neg(fgraph, node): Make it so that the test does not generate an error in that case! """ - if node.op != true_div: + if node.op != true_divide: return False if not node.inputs[1].owner or node.inputs[1].owner.op != erfc: return False @@ -2865,7 +2867,7 @@ def check_input(inputs): return None # we move the y outside the div. - true_div_no_mul = true_div(exp_in, erfc_in) + true_div_no_mul = true_divide(exp_in, erfc_in) true_div_no_mul.owner.tag.local_grad_log_erfc_neg = True # aaron value @@ -3153,7 +3155,7 @@ def is_neg(var): @register_stabilize -@node_rewriter([true_div]) +@node_rewriter([true_divide]) def local_exp_over_1_plus_exp(fgraph, node): """ @@ -3163,7 +3165,7 @@ def local_exp_over_1_plus_exp(fgraph, node): """ # This rewrite should be done for numerical stability # so we don't care to check client counts - if node.op == true_div: + if node.op == true_divide: # find all the exp() terms in the numerator num, denom = node.inputs @@ -3540,7 +3542,7 @@ def local_reciprocal_1_plus_exp(fgraph, node): """``reciprocal(1+exp(x)) -> sigm(-x)`` TODO: This is redundant; we can just decided on *one* canonical form - for division (e.g. either `true_div` or `reciprocal`) and have this + for division (e.g. either `true_divide` or `reciprocal`) and have this taken care of with existing rewrites. """ # This Rewrite should be done for numerical stability @@ -3592,7 +3594,7 @@ def local_reciprocal_1_plus_exp(fgraph, node): # log(sigmoid(x) / (1 - sigmoid(x))) -> x # i.e logit(sigmoid(x)) -> x local_logit_sigmoid = PatternNodeRewriter( - (log, (true_div, (sigmoid, "x"), (sub, 1, (sigmoid, "x")))), + (log, (true_divide, (sigmoid, "x"), (sub, 1, (sigmoid, "x")))), "x", tracks=[sigmoid], get_nodes=get_clients_at_depth2, @@ -3606,7 +3608,7 @@ def local_reciprocal_1_plus_exp(fgraph, node): # sigmoid(log(x / (1-x)) -> x # i.e., sigmoid(logit(x)) -> x local_sigmoid_logit = PatternNodeRewriter( - (sigmoid, (log, (true_div, "x", (sub, 1, "x")))), + (sigmoid, (log, (true_divide, "x", (sub, 1, "x")))), "x", allow_multiple_clients=True, name="local_sigmoid_logit", diff --git a/aesara/tensor/rewriting/special.py b/aesara/tensor/rewriting/special.py index 747f2ce726..167585b4bb 100644 --- a/aesara/tensor/rewriting/special.py +++ b/aesara/tensor/rewriting/special.py @@ -3,7 +3,7 @@ from aesara.tensor.elemwise import DimShuffle, Elemwise from aesara.tensor.math import Sum, exp from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import true_div +from aesara.tensor.math import true_divide from aesara.tensor.rewriting.basic import register_specialize from aesara.tensor.rewriting.math import local_mul_canonizer from aesara.tensor.special import LogSoftmax, Softmax, SoftmaxGrad @@ -50,7 +50,7 @@ def local_logsoftmax_grad(fgraph, node): isinstance(node.op, SoftmaxGrad) and len(node.inputs) == 2 and node.inputs[0].owner is not None - and node.inputs[0].owner.op == true_div + and node.inputs[0].owner.op == true_divide and len(node.inputs[0].owner.inputs) >= 2 and node.inputs[0].owner.inputs[1].owner is not None and isinstance(node.inputs[0].owner.inputs[1].owner.op, Softmax) @@ -58,7 +58,7 @@ def local_logsoftmax_grad(fgraph, node): and not ( # skip if it will be optimized by # local_advanced_indexing_crossentropy_onehot_grad - node.inputs[0].owner.op == true_div + node.inputs[0].owner.op == true_divide and node.inputs[0].owner.inputs[0].owner is not None and isinstance( node.inputs[0].owner.inputs[0].owner.op, AdvancedIncSubtensor diff --git a/aesara/tensor/var.py b/aesara/tensor/var.py index d23f9b5ec6..249f63c002 100644 --- a/aesara/tensor/var.py +++ b/aesara/tensor/var.py @@ -169,14 +169,14 @@ def __mod__(self, other): def __divmod__(self, other): return at.math.divmod(self, other) - def __truediv__(self, other): - return at.math.true_div(self, other) + def __truedivide__(self, other): + return at.math.true_divide(self, other) def __floordiv__(self, other): return at.math.floor_div(self, other) def __rtruediv__(self, other): - return at.math.true_div(other, self) + return at.math.true_divide(other, self) def __rfloordiv__(self, other): return at.math.floor_div(other, self) diff --git a/doc/extending/graph_rewriting.rst b/doc/extending/graph_rewriting.rst index 9eb8d282ec..0e0a94f541 100644 --- a/doc/extending/graph_rewriting.rst +++ b/doc/extending/graph_rewriting.rst @@ -111,7 +111,7 @@ simplification described above: def apply(self, fgraph): for node in fgraph.toposort(): - if node.op == true_div: + if node.op == true_divide: x, y = node.inputs z = node.outputs[0] if x.owner and x.owner.op == mul: @@ -152,17 +152,17 @@ nothing. Now, we test the rewriter: ->>> from aesara.scalar import float64, add, mul, true_div +>>> from aesara.scalar import float64, add, mul, true_divide >>> x = float64('x') >>> y = float64('y') >>> z = float64('z') ->>> a = add(z, mul(true_div(mul(y, x), y), true_div(z, x))) +>>> a = add(z, mul(true_divide(mul(y, x), y), true_divide(z, x))) >>> e = aesara.graph.fg.FunctionGraph([x, y, z], [a]) >>> e -FunctionGraph(add(z, mul(true_div(mul(y, x), y), true_div(z, x)))) +FunctionGraph(add(z, mul(true_divide(mul(y, x), y), true_divide(z, x)))) >>> simplify.rewrite(e) >>> e -FunctionGraph(add(z, mul(x, true_div(z, x)))) +FunctionGraph(add(z, mul(x, true_divide(z, x)))) You can check what happens if you put many instances of :math:`\frac{xy}{y}` in the graph. Note that it sometimes @@ -172,13 +172,13 @@ rewrite you wrote. For example, consider the following: >>> x = float64('x') >>> y = float64('y') >>> z = float64('z') ->>> a = true_div(mul(add(y, z), x), add(y, z)) +>>> a = true_divide(mul(add(y, z), x), add(y, z)) >>> e = aesara.graph.fg.FunctionGraph([x, y, z], [a]) >>> e -FunctionGraph(true_div(mul(add(y, z), x), add(y, z))) +FunctionGraph(true_divide(mul(add(y, z), x), add(y, z))) >>> simplify.rewrite(e) >>> e -FunctionGraph(true_div(mul(add(y, z), x), add(y, z))) +FunctionGraph(true_divide(mul(add(y, z), x), add(y, z))) Nothing happened here. The reason is: ``add(y, z) != add(y, z)``. That is the case for efficiency reasons. To fix this problem we @@ -190,7 +190,7 @@ computation, using the :class:`MergeOptimizer` defined in >>> MergeOptimizer().rewrite(e) # doctest: +ELLIPSIS (0, ..., None, None, {}, 1, 0) >>> e -FunctionGraph(true_div(mul(*1 -> add(y, z), x), *1)) +FunctionGraph(true_divide(mul(*1 -> add(y, z), x), *1)) >>> simplify.rewrite(e) >>> e FunctionGraph(x) @@ -224,7 +224,7 @@ The local version of the above code would be the following: class LocalSimplify(NodeRewriter): def transform(self, fgraph, node): - if node.op == true_div: + if node.op == true_divide: x, y = node.inputs if x.owner and x.owner.op == mul: a, b = x.owner.inputs @@ -237,7 +237,7 @@ The local version of the above code would be the following: def tracks(self): # This tells certain navigators to only apply this `NodeRewriter` # on these kinds of `Op`s - return [true_div] + return [true_divide] local_simplify = LocalSimplify() @@ -261,15 +261,15 @@ subset of them) and applies one or several node rewriters. >>> x = float64('x') >>> y = float64('y') >>> z = float64('z') ->>> a = add(z, mul(true_div(mul(y, x), y), true_div(z, x))) +>>> a = add(z, mul(true_divide(mul(y, x), y), true_divide(z, x))) >>> e = aesara.graph.fg.FunctionGraph([x, y, z], [a]) >>> e -FunctionGraph(add(z, mul(true_div(mul(y, x), y), true_div(z, x)))) +FunctionGraph(add(z, mul(true_divide(mul(y, x), y), true_divide(z, x)))) >>> simplify = aesara.graph.rewriting.basic.WalkingGraphRewriter(local_simplify) >>> simplify.rewrite(e) (, 1, 5, 3, ..., ..., ...) >>> e -FunctionGraph(add(z, mul(x, true_div(z, x)))) +FunctionGraph(add(z, mul(x, true_divide(z, x)))) :class:`SubstitutionNodeRewriter`, :class:`RemovalNodeRewriter`, :class:`PatternNodeRewriter` +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -309,8 +309,8 @@ Aesara defines some shortcuts to make :class:`NodeRewriter`\s: # The "simplify" operation we've been defining in the past few # sections. Note that we need two patterns to account for the # permutations of the arguments to `mul`. - local_simplify_1 = PatternNodeRewriter((true_div, (mul, 'x', 'y'), 'y'), 'x') - local_simplify_2 = PatternNodeRewriter((true_div, (mul, 'x', 'y'), 'x'), 'y') + local_simplify_1 = PatternNodeRewriter((true_divide, (mul, 'x', 'y'), 'y'), 'x') + local_simplify_2 = PatternNodeRewriter((true_divide, (mul, 'x', 'y'), 'x'), 'y') .. note:: diff --git a/doc/library/config.rst b/doc/library/config.rst index a6782b9679..88b75423d8 100644 --- a/doc/library/config.rst +++ b/doc/library/config.rst @@ -278,7 +278,7 @@ import ``aesara`` and print the config variable, as in: release of Aesara). It raises an error when one tries to perform such an operation, enforcing the use of the integer division operator (``//``). If a float result is desired, either cast one of the arguments to a float, or use - ``x.__truediv__(y)``. + ``x.__truedivide__(y)``. .. attribute:: mode diff --git a/doc/tutorial/printing_drawing.rst b/doc/tutorial/printing_drawing.rst index 2323ee0560..22d5846a31 100644 --- a/doc/tutorial/printing_drawing.rst +++ b/doc/tutorial/printing_drawing.rst @@ -67,7 +67,7 @@ The pre-compilation graph: >>> aesara.printing.debugprint(prediction) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS Elemwise{gt,no_inplace} [id A] '' - |Elemwise{true_div,no_inplace} [id B] '' + |Elemwise{true_divide,no_inplace} [id B] '' | |InplaceDimShuffle{x} [id C] '' | | |TensorConstant{1} [id D] | |Elemwise{add,no_inplace} [id E] '' diff --git a/tests/scalar/test_basic.py b/tests/scalar/test_basic.py index 744e100601..bab60d1b4c 100644 --- a/tests/scalar/test_basic.py +++ b/tests/scalar/test_basic.py @@ -53,7 +53,7 @@ switch, tan, tanh, - true_div, + true_divide, uint8, ) from aesara.tensor.type import fscalar, imatrix, iscalar, matrix @@ -62,7 +62,7 @@ def test_mul_add_true(): x, y, z = floats("xyz") - e = mul(add(x, y), true_div(x, y)) + e = mul(add(x, y), true_divide(x, y)) g = FunctionGraph([x, y], [e]) fn = make_function(DualLinker().accept(g)) assert fn(1.0, 2.0) == 1.5 @@ -108,7 +108,7 @@ def has_f16(comp): def test_straightforward(self): x, y, z = floats("xyz") - e = mul(add(x, y), true_div(x, y)) + e = mul(add(x, y), true_divide(x, y)) C = Composite([x, y], [e]) c = C.make_node(x, y) # print c.c_code(['x', 'y'], ['z'], dict(id = 0)) @@ -130,7 +130,7 @@ def test_flatten(self): def test_with_constants(self): x, y, z = floats("xyz") - e = mul(add(70.0, y), true_div(x, y)) + e = mul(add(70.0, y), true_divide(x, y)) comp_op = Composite([x, y], [e]) comp_node = comp_op.make_node(x, y) @@ -367,10 +367,10 @@ def test_true_div(self): xf = ScalarType(aesara.config.floatX)("xf") yf = ScalarType(aesara.config.floatX)("yf") - ei = true_div(xi, yi) + ei = true_divide(xi, yi) fi = aesara.function([xi, yi], ei) - ef = true_div(xf, yf) + ef = true_divide(xf, yf) ff = aesara.function([xf, yf], ef) for x_val in x_range: diff --git a/tests/sparse/test_var.py b/tests/sparse/test_var.py index 75936d70e2..30c518eb81 100644 --- a/tests/sparse/test_var.py +++ b/tests/sparse/test_var.py @@ -128,7 +128,7 @@ def test_unary(self, method, exp_type, cm, x): ("__pow__", DenseTensorType), ("__mod__", DenseTensorType), ("__divmod__", DenseTensorType), - ("__truediv__", DenseTensorType), + ("__truedivide__", DenseTensorType), ("__floordiv__", DenseTensorType), ], ) diff --git a/tests/tensor/rewriting/test_basic.py b/tests/tensor/rewriting/test_basic.py index 9bc2caad5a..76a5ed3f2d 100644 --- a/tests/tensor/rewriting/test_basic.py +++ b/tests/tensor/rewriting/test_basic.py @@ -52,7 +52,7 @@ from aesara.tensor.math import pow as at_pow from aesara.tensor.math import softplus, sqrt, sub from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import true_div +from aesara.tensor.math import true_divide from aesara.tensor.rewriting.basic import ( assert_op, local_alloc_sink_dimshuffle, @@ -898,7 +898,7 @@ def test_local_div_switch_sink(self): (dvector("x"), self.xv), (dscalar("x"), self.xs), ]: - y = true_div( + y = true_divide( at.switch(condition[0] > 0, 1.0 * x[0], 0.0 * x[0]), at.switch(condition[0] > 0, 1.0 * x[0], log(c) * x[0]), ) @@ -1083,7 +1083,7 @@ class TestLocalMergeSwitchSameCond: add, sub, mul, - true_div, + true_divide, int_div, floor_div, minimum, @@ -1447,7 +1447,7 @@ def test_local_upcast_elemwise_constant_inputs(): # This tests a corner case for which the rewrite should not be applied. with config.change_flags(floatX="float32"): v = lvector() - function([v], true_div(v, 2)) + function([v], true_divide(v, 2)) def test_assert_op_gradient(): diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py index d4d5c58cdf..8b86eb37d1 100644 --- a/tests/tensor/rewriting/test_elemwise.py +++ b/tests/tensor/rewriting/test_elemwise.py @@ -43,7 +43,7 @@ from aesara.tensor.math import round as at_round from aesara.tensor.math import sin, sinh, sqr, sqrt from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import tan, tanh, true_div, xor +from aesara.tensor.math import tan, tanh, true_divide, xor from aesara.tensor.rewriting.elemwise import local_dimshuffle_lift from aesara.tensor.rewriting.shape import local_useless_dimshuffle_in_reshape from aesara.tensor.shape import reshape @@ -611,7 +611,7 @@ def my_init(dtype="float64", num=0): "float32", ), ( - fx - true_div(fy, 2), + fx - true_divide(fy, 2), (fx, fy), (fxv, fyv), 1, @@ -619,7 +619,7 @@ def my_init(dtype="float64", num=0): "float32", ), ( - fx - true_div(fy, fz), + fx - true_divide(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py index 80e7ea5c45..ebe412395c 100644 --- a/tests/tensor/rewriting/test_math.py +++ b/tests/tensor/rewriting/test_math.py @@ -78,7 +78,7 @@ from aesara.tensor.math import round as at_round from aesara.tensor.math import sgn, sigmoid, sin, sinh, softplus, sqr, sqrt, sub from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import tan, tanh, true_div, xor +from aesara.tensor.math import tan, tanh, true_divide, xor from aesara.tensor.rewriting.elemwise import local_dimshuffle_lift from aesara.tensor.rewriting.math import ( compute_mul, @@ -594,7 +594,7 @@ def test_mul_div_cases(self): assert out_dtype == out.dtype utt.assert_allclose(out, val_inputs[1]) topo = f.maker.fgraph.toposort() - assert not any(node.op == at.true_div for node in topo) + assert not any(node.op == at.true_divide for node in topo) # test x / y / x -> 1 / y for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate( @@ -1431,7 +1431,7 @@ def my_init(shp, dtype="float64", num=0): "float32", ), ( - fx - true_div(fy, 2), + fx - true_divide(fy, 2), (fx, fy), (fxv, fyv), 1, @@ -1439,7 +1439,7 @@ def my_init(shp, dtype="float64", num=0): "float32", ), ( - fx - true_div(fy, fz), + fx - true_divide(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, @@ -2796,7 +2796,7 @@ def test_local_div_switch_sink(self): (dvector("x"), self.xv), (dscalar("x"), self.xs), ]: - y = true_div( + y = true_divide( at.switch(condition[0] > 0, 1.0 * x[0], 0.0 * x[0]), at.switch(condition[0] > 0, 1.0 * x[0], log(c) * x[0]), ) @@ -3132,7 +3132,7 @@ def test_elemwise(self): add, sub, mul, - true_div, + true_divide, int_div, floor_div, minimum, @@ -3951,7 +3951,7 @@ def test3(self): @pytest.mark.parametrize("t", [scalar, ivector, ftensor4]) -@pytest.mark.parametrize("op", [int_div, true_div]) +@pytest.mark.parametrize("op", [int_div, true_divide]) def test_local_zero_div(t, op): """Test the canonicalization ``0/x -> 0``.""" x = t("x") diff --git a/tests/tensor/rewriting/test_special.py b/tests/tensor/rewriting/test_special.py index 7da3b2a953..79bffeb2ae 100644 --- a/tests/tensor/rewriting/test_special.py +++ b/tests/tensor/rewriting/test_special.py @@ -9,7 +9,7 @@ from aesara.graph.fg import FunctionGraph from aesara.graph.rewriting.basic import check_stack_trace from aesara.graph.rewriting.db import RewriteDatabaseQuery -from aesara.tensor.math import add, exp, log, true_div +from aesara.tensor.math import add, exp, log, true_divide from aesara.tensor.special import LogSoftmax, Softmax, SoftmaxGrad, softmax from aesara.tensor.type import matrix from tests import unittest_tools as utt @@ -78,7 +78,7 @@ def test_logsoftmax_grad_true_div_elemwise(self): softmax_grad_node = g.owner assert softmax_grad_node.op == SoftmaxGrad(axis=-1) true_div_node = softmax_grad_node.inputs[0].owner - assert true_div_node.op == true_div + assert true_div_node.op == true_divide # We replace thk elemwise true_div op by an elemwise add. new_g = SoftmaxGrad(axis=-1)( diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index f56e5b0358..554094ca0c 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -1210,7 +1210,7 @@ def test_broadcast_shape_symbolic_one_symbolic(): three_at = at.as_tensor(3, dtype=np.int64) int_div = one_at / one_at - assert int_div.owner.op == at.true_div + assert int_div.owner.op == at.true_divide index_shapes = [ (one_at, one_at, three_at), diff --git a/tests/tensor/test_math.py b/tests/tensor/test_math.py index 3b7d4c2447..3bb13dd912 100644 --- a/tests/tensor/test_math.py +++ b/tests/tensor/test_math.py @@ -109,7 +109,7 @@ sub, ) from aesara.tensor.math import sum as at_sum -from aesara.tensor.math import tan, tanh, tensordot, true_div, trunc, var +from aesara.tensor.math import tan, tanh, tensordot, true_divide, trunc, var from aesara.tensor.type import ( TensorType, complex_dtypes, @@ -327,7 +327,7 @@ def test_maximum_minimum_grad(): # fmt: on TestTrueDivBroadcast = makeBroadcastTester( - op=true_div, + op=true_divide, expected=_numpy_true_div, good=_good_broadcast_div_mod_normal_float_no_complex, grad=_grad_broadcast_div_mod_normal, @@ -1832,8 +1832,8 @@ def test_impls(self): assert np.allclose(function([i, f], f / i)(5, 11.0), (11.0 / 5.0)) assert np.allclose(function([i, ii], i // ii)(5, 3), (5 // 3)) assert np.allclose(function([i, ii], ii // i)(5, 3), (3 // 5)) - assert np.allclose(function([i, ii], true_div(i, ii))(5, 3), (5.0 / 3.0)) - assert np.allclose(function([i, ii], true_div(ii, i))(5, 3), (3.0 / 5.0)) + assert np.allclose(function([i, ii], true_divide(i, ii))(5, 3), (5.0 / 3.0)) + assert np.allclose(function([i, ii], true_divide(ii, i))(5, 3), (3.0 / 5.0)) assert np.allclose(function([i, c], i / c)(5, complex(5, 3)), (5.0 / (5 + 3j))) assert np.allclose(function([i, c], c / i)(5, complex(5, 3)), ((5 + 3j) / 5.0))