diff --git a/doc/conf.py b/doc/conf.py index 4809038e..28dadebc 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -28,7 +28,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ["sphinx.ext.autodoc", "sphinx_copybutton"] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx_copybutton"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/doc/index.rst b/doc/index.rst index 496c4d49..920de4b9 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -27,10 +27,8 @@ quick taste of how to use :mod:`uncertainties`: The :mod:`uncertainties` library calculates uncertainties using linear `error propagation theory`_ by automatically :ref:`calculating derivatives ` and analytically propagating these to the results. Correlations -between variables are automatically handled. This library can also yield the -derivatives of any expression with respect to the variables that have uncertain -values. For other approaches, see soerp_ (using higher-order terms) and mcerp_ -(using a Monte-Carlo approach). +between variables are automatically handled. For other approaches, see soerp_ (using +higher-order terms) and mcerp_ (using a Monte-Carlo approach). The `source code`_ for the uncertainties package is licensed under the `Revised BSD License`_. This documentation is licensed under the `CC-SA-3 License`_. diff --git a/doc/make.bat b/doc/make.bat new file mode 100644 index 00000000..207019d5 --- /dev/null +++ b/doc/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/tech_guide.rst b/doc/tech_guide.rst index f3dc5284..c59918c0 100644 --- a/doc/tech_guide.rst +++ b/doc/tech_guide.rst @@ -27,7 +27,7 @@ user-supplied function. .. autofunction:: ufloat_fromstr -.. autoclass:: Variable +.. autoclass:: UFloat .. autofunction:: wrap diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 5ba5d465..ac9081ec 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -5,28 +5,31 @@ User Guide ========== - -Basic usage -=========== - -Basic mathematical operations involving numbers with uncertainties requires -importing the :func:`ufloat` function which creates a :class:`Variable`: -number with both a nominal value and an uncertainty. - - >>> from uncertainties import ufloat - >>> x = ufloat(2.7, 0.01) # a Variable with a value 2.7+/-0.01 - -The :mod:`uncertainties` module contains sub-modules for :ref:`advanced -mathematical functions `, and :doc:`arrays and -matrices `, which can be accessed as well. +The :mod:`uncertainties` package is built around the :class:`UFloat` class. +:class:`UFloat` objects model statistical random variables. +A :class:`UFloat` object has a nominal value which models the mean of a random variable +and an uncertainty which can be used to calculate the standard deviation of the random +variable and correlations between one random variable and another. +It is possible to construct new random variables by applying mathematical operations to +and between existing random variables. +Likewise, it is possible to apply mathematical operations such as arithmetic or +trigonometric functions to :class:`UFloat` objects to get new :class:`UFloat` objects. +The nominal values pass through the mathematical operations as if they were regular +:class:`float` objects, but the uncertainties propagate according to the approximate +rules of linear error propagation based on local derivatives of the mathematical +operations. + +In addition to the :class:`UFloat` class, the :mod:`uncertainties` package also provides +sub-modules for :ref:`advanced mathematical functions `, and +:doc:`arrays and matrix ` operations. .. index:: - pair: number with uncertainty; creation + pair: number with uncertainty; -Creating Variables: numbers with uncertainties -================================================ +Creating UFloat objects: numbers with uncertainties +=================================================== -To create a number with uncertainties or *Variable*, use the :func:`ufloat` +To create a number with uncertainties or :class:`UFloat`, use the :func:`ufloat` function, which takes a *nominal value* (which can be interpreted as the most likely value, or the mean or central value of the distribution of values), a *standard error* (the standard deviation or :math:`1-\sigma` uncertainty), and @@ -39,8 +42,8 @@ an optional *tag*: pair: nominal value; scalar pair: uncertainty; scalar -You can access the nominal value and standard deviation for any Variable with -the `nominal_value` and `std_dev` attributes: +You can access the nominal value and standard deviation for any :class:`UFloat` object +with the `nominal_value` and `std_dev` attributes: >>> print(x.nominal_value, x.std_dev) 2.7 0.01 @@ -52,9 +55,9 @@ abbreviated as `n` and `std_dev` as `s`: >>> print(x.n, x.s) 2.7 0.01 -uncertainties Variables can also be created from one of many string -representations. The following forms will all create Variables with the same -value: +uncertainties :class:`UFloat` objects can also be created from one of many string +representations. The following forms will all create :class:`UFloat` objects with +the same values: >>> from uncertainties import ufloat_fromstr >>> x = ufloat(0.2, 0.01) @@ -68,12 +71,13 @@ value: More details on the :func:`ufloat` and :func:`ufloat_from_str` can be found in :ref:`api_funcs`. -Basic math with uncertain Variables -========================================= +Basic math with uncertain UFloat objects +======================================== -Uncertainties variables created in :func:`ufloat` or :func:`ufloat_fromstr` can -be used in basic mathematical calculations (``+``, ``-``, ``*``, ``/``, ``**``) -as with other Python numbers and variables. +Uncertainties :class:`UFloat` objects created using :func:`ufloat` or +:func:`ufloat_fromstr` can be used in basic mathematical calculations +(``+``, ``-``, ``*``, ``/``, ``**``) +as with other Python numbers. >>> t = ufloat(0.2, 0.01) >>> double = 2.0*t @@ -83,44 +87,45 @@ as with other Python numbers and variables. >>> print(square) 0.040+/-0.004 -When adding two Variables, the uncertainty in the result is the quadrature sum -(square-root of the sum of squares) of the uncertainties of the two Variables: +When adding two uncorrelated :class:`UFloat` objects, the standard deviation in the +resulting :class:`UFloat` object is the quadrature sum (square-root of the sum of +squares) of the standard deviations of the two input :class:`UFloat` objects. >>> x = ufloat(20, 4) >>> y = ufloat(12, 3) >>> print(x+y) 32.0+/-5.0 -We can check that error propagation when adding two independent variables -(using the abbreviation `.s` for the standard error): +Note that calls :func:`ufloat` create :class:`UFloat` objects which have no correlation +to any previously created :class:`UFloat` objects. +We can check the correctness of the error propagation for adding two uncorrelated +:class:`UFloat` objects: >>> from math import sqrt >>> (x+y).s == sqrt(x.s**2 + y.s**2) True - -Multiplying two Variables will properly propagate those -uncertainties too: +We can also multiply two :class:`UFloat` objects: >>> print(x*y) 240.0+/-76.83749084919418 >>> (x*y).s == (x*y).n * sqrt((x.s/x.n)**2 + (y.s/y.n)**2 ) True -But note that adding a Variable to itself does not add its uncertainties in -quadrature, but are simply scaled: +However, consider the behavior when we add a :class:`UFloat` object to itself: >>> print(x+x) 40.0+/-8.0 >>> print(3*x + 10) 70.0+/-12.0 - -It is important to understand that calculations done with Variable know about -the correlation between the Variables. Variables created with :func:`ufloat` -(and :func:`ufloat_fromstr`) are completely uncorrelated with each other, but -are known to be completely correlated with themselves. This means that - +In this case the resulting standard deviation is not the quadrature sum of the +standard deviation on the inputs. +This is because the :class:`UFloat` ``x`` is correlated with itself so the error +propagation must be handled accordingly. +This begins to demonstrate that calculations performed using :class:`UFloat` objects are +aware of correlations between :class:`UFloat` objects. +This is demonstrated in these two simple examples: >>> x = ufloat(5, 0.5) >>> y = ufloat(5, 0.5) @@ -129,20 +134,15 @@ are known to be completely correlated with themselves. This means that >>> x - x 0.0+/-0 -For two *different* Variables, uncorrelated uncertainties will be propagated. -But when doing a calculation with a single Variable, the uncertainties are -correlated, and calculations will reflect that. - - .. index:: mathematical operation; on a scalar, umath .. _advanced math operations: -Mathematical operations with uncertain Variables +Mathematical operations with uncertain UFloat objects ===================================================== -Besides being able to apply basic mathematical operations to uncertainties -Variables, this package provides generalized versions of 40 of the the +Besides being able to apply basic arithmetic operations to uncertainties +:class`UFloat` objects, this package provides generalized versions of 40 of the the functions from the standard :mod:`math` *module*. These mathematical functions are found in the :mod:`uncertainties.umath` module: @@ -171,19 +171,17 @@ The functions in the :mod:`uncertainties.umath` module include: Comparison operators ==================== -Comparison operators (``==``, ``!=``, ``>``, ``<``, ``>=``, and ``<=``) for Variables with -uncertainties are somewhat complicated, and need special attention. As we -hinted at above, and will explore in more detail below and in the +Comparison operators (``==``, ``!=``, ``>``, ``<``, ``>=``, and ``<=``) for +:class:`UFloat` objects with uncertainties are somewhat complicated, and need special +attention. As we hinted at above, and will explore in more detail below and in the :ref:`Technical Guide `, this relates to the correlation -between Variables. - - +between :class:`UFloat` objects. Equality and inequality comparisons ------------------------------------ -If we compare the equality of two Variables with the same nominal value and -uncertainty, we see +If we compare the equality of two :class:`UFloat` objects with the same nominal value +and standard deviation, we see >>> x = ufloat(5, 0.5) >>> y = ufloat(5, 0.5) @@ -192,20 +190,20 @@ True >>> x == y False -The difference here is that although the two Python objects have the same -nominal value and uncertainty, these are independent, uncorrelated values. It -is not exactly true that the difference is based on identity, note that +The difference here is that although the two :class:`UFloat` objects have the same +nominal value and standard deviation, they model two *uncorrelated* random variables. +This can be demonstrated with the :meth:`UFloat.covariance` method. ->>> x == (1.0*x) -True ->>> x is (1.0*x) -False +>>> print(x.covariance(x)) +0.25 +>>> print(x.covariance(y)) +0 In order for the result of two calculations with uncertainties to be considered equal, the :mod:`uncertainties` package does not test whether the nominal value -and the uncertainty have the same value. Instead it checks whether the -difference of the two calculations has a nominal value of 0 *and* an -uncertainty of 0. +and the standard deviation have the same value. Instead it checks whether the +difference of the two calculations has a nominal value of 0 *and* a standard deviation +of 0. >>> (x -x) 0.0+/-0 @@ -214,13 +212,14 @@ uncertainty of 0. Comparisons of magnitude ------------------------------------- +------------------------ The concept of comparing the magnitude of values with uncertainties is a bit -complicated. That is, a Variable with a value of 25 +/- 10 might be greater -than a Variable with a value of 24 +/- 8 most of the time, but *sometimes* it -might be less than it. The :mod:`uncertainties` package takes the simple -approach of comparing nominal values. That is +complicated. That is, a random variable with a mean of 25 and standard deviation of 1 +might be greater than a random variable with a mean of 24 and standard deviation of 0.8 +most of the time, but *sometimes* it might be less than it. +The :mod:`uncertainties` package takes the simple approach of comparing nominal values. +That is >>> a = ufloat(25, 10) >>> b = ufloat(24, 8) @@ -228,7 +227,7 @@ approach of comparing nominal values. That is True Note that combining this comparison and the above discussion of `==` and `!=` -can lead to a result that maybe somewhat surprising: +can lead to a somewhat surprising result: >>> a = ufloat(25, 10) @@ -258,10 +257,10 @@ Variable. As is always the case, care must be exercised when handling NaN values. While :func:`math.isnan` and :func:`numpy.isnan` will raise `TypeError` -exceptions for uncertainties Variables (because an uncertainties Variable is -not a float), the function :func:`umath.isnan` will return whether the nominal -value of a Variable is NaN. Similarly, :func:`umath.isinf` will return whether -the nominal value of a Variable is infinite. +exceptions for uncertainties :class:`UFloat` objects (because an uncertainties +:class:`UFloat` object is not a float), the function :func:`umath.isnan` will return +whether the nominal value of a Variable is NaN. Similarly, :func:`umath.isinf` will +return whether the nominal value of a Variable is infinite. To check whether the uncertainty is NaN or Inf, use one of :func:`math.isnan`, :func:`math.isinf`, :func:`nupmy.isnan`, or , :func:`nupmy.isinf` on the @@ -274,8 +273,8 @@ To check whether the uncertainty is NaN or Inf, use one of :func:`math.isnan`, Automatic correlations ====================== -Correlations between variables are **automatically handled** whatever -the number of variables involved, and whatever the complexity of the +Correlations between :class:`UFloat` objects are **automatically handled** whatever +the number of :class:`UFloat` objects involved, and whatever the complexity of the calculation. For example, when :data:`x` is the number with uncertainty defined above, @@ -302,51 +301,128 @@ transparently. -Access to the individual sources of uncertainty -=============================================== - -The various contributions to an uncertainty can be obtained through the -:func:`error_components` method, which maps the **independent variables -a quantity depends on** to their **contribution to the total -uncertainty**. According to :ref:`linear error propagation theory -` (which is the method followed by :mod:`uncertainties`), -the sum of the squares of these contributions is the squared -uncertainty. - -The individual contributions to the uncertainty are more easily usable -when the variables are **tagged**: - ->>> u = ufloat(1, 0.1, "u variable") # Tag ->>> v = ufloat(10, 0.1, "v variable") ->>> sum_value = u+2*v ->>> sum_value -21.0+/-0.223606797749979 ->>> for (var, error) in sum_value.error_components().items(): -... print("{}: {}".format(var.tag, error)) -... -u variable: 0.1 -v variable: 0.2 - -The variance (i.e. squared uncertainty) of the result -(:data:`sum_value`) is the quadratic sum of these independent -uncertainties, as it should be (``0.1**2 + 0.2**2``). - -The tags *do not have to be distinct*. For instance, *multiple* random -variables can be tagged as ``"systematic"``, and their contribution to -the total uncertainty of :data:`result` can simply be obtained as: +UAtoms: How Uncertainty and Correlations are Tracked +==================================================== + +The basic, indivisibile, unit of uncertainty in the :mod:`uncertainties` package is the +:class:`UAtom`. +A :class:`UAtom` models a random variable with zero mean and unity standard deviation. +Every :class:`UAtom` is unique and uncorrelated with every other :class:`UAtom`. +The uncertainty of a :class:`UFloat` object is a :class:`UCombo` object which models a +:class:`float`-weighted linear combination of :class:`UAtom` objects. +A :class:`UFloat` object can be thought of as a sum of a fixed nominal value together +with a zero-mean :class:`UCombo` object. + +The standard deviation of a single :class:`UFloat` object is calculated by taking the +sum-of-squares of the weights for all the :class:`UAtom` objects that make up the +corresponding :attribute:`uncertainty` attribute for that :class:`UFloat` object. +The correlation between two :class:`UFloat` objects is calculated by taking the sum +of products of the weights of shared :class:`UAtom` objects between the two +:class:`UFloat` :attribute:`uncertainty` attributes. + +Every time a new :class:`UFloat` is instantiated via the :func:`ufloat` function a +single new independent :class:`UAtom` is also instantiated (and given the optional tag +passed into :func:`ufloat`) and paired with the new :class:`UFloat`. +When :class:`UFloat` objects are combined together using mathematical operations the +resulting :class:`UFloat` objects inherit dependence on the :class:`UAtom` objects +on which the input :class:`UFloat` objects depend in accordance with +:ref:`linear error propagation theory `. +In this way, the correlation between :class:`UFloat` objects can be tracked. + +We can get access to the :class:`UAtom` objects on which a given :class:`UFloat` +depends, and their corresponding weights using the :attribute:`UFloat.error_components` +attribute: + + +.. testsetup:: uuid + + from uncertainties import ufloat + from unittest.mock import patch + import uuid + import random + + class FakeUUID4: + def __init__(self): + self.seed = 0 + self.rnd = random.Random() + + def __call__(self): + self.rnd.seed(self.seed) + fake_uuid = uuid.UUID(int=self.rnd.getrandbits(128), version=4) + self.seed += 1 + return fake_uuid + fake_uuid4 = FakeUUID4() + + p = patch('uncertainties.ucombo.uuid.uuid4', fake_uuid4) + p.start() + +.. doctest:: uuid + + >>> x = ufloat(1, 0.1) + >>> y = ufloat(2, 0.3) + >>> z = x * y + >>> print(x.error_components) + {UAtom(e3e70682-c209-4cac-a29f-6fbed82c07cd): 0.1} + >>> print(y.error_components) + {UAtom(cd613e30-d8f1-4adf-91b7-584a2265b1f5): 0.3} + >>> print(z.error_components) + {UAtom(cd613e30-d8f1-4adf-91b7-584a2265b1f5): 0.3, UAtom(e3e70682-c209-4cac-a29f-6fbed82c07cd): 0.2} + +The standard deviation of each :class:`UFloat` is given by the sum of squares of the +weights for all the :class:`UAtom` objects on which that :class:`UFloat` depends + +.. doctest:: uuid + + >>> print(x.std_dev) + 0.1 + >>> print(y.std_dev) + 0.3 + >>> print(z.std_dev) + 0.36055512754639896 + +The :func:`ufloat` function accepts a ``tag`` argument. +If a string is passed in as the ``tag`` then this ``tag`` gets added to the new +:class:`UAtom` object that is instantiated together with the new :class:`UFloat`. +Note that :class:`UFloat` objects do not carry tags, only the underlying :class`UAtom` +objects do. +The tags on :class:`UAtom` objects can be used to keep track of the statistical +relationships in a more human-readable way: + +.. doctest:: uuid + + >>> x = ufloat(1, 0.1, tag='x') + >>> y = ufloat(2, 0.3, tag='y') + >>> z = x * y + >>> + >>> for uatom, weight in z.error_components.items(): + ... if uatom.tag is not None: + ... label = uatom.tag + ... else: + ... label = uatom.uuid + ... print(f"{label}: {weight}") + y: 0.3 + x: 0.2 + + +.. testcleanup :: uuid + + p.stop() + +The tags *do not have to be distinct*. For instance, *multiple* :class:`UFloat` objects +can be tagged as ``"systematic"``, and their contribution to the total uncertainty of +:data:`result` can simply be obtained as: >>> syst_error = math.sqrt(sum( # Error from *all* systematic errors ... error**2 -... for (var, error) in result.error_components().items() -... if var.tag == "systematic")) +... for (uatom, error) in result.error_components().items() +... if uatom.tag == "systematic")) The remaining contribution to the uncertainty is: >>> other_error = math.sqrt(result.std_dev**2 - syst_error**2) -The variance of :data:`result` is in fact simply the quadratic sum of -these two errors, since the variables from -:func:`result.error_components` are independent. +The variance of :data:`result` is in fact simply the quadratic sum of these two errors, +since the :class:`UAtom` objects from :func:`result.error_components` are independent. .. index:: comparison operators diff --git a/tests/cases/double_inputs.json b/tests/cases/double_inputs.json new file mode 100644 index 00000000..a9faef88 --- /dev/null +++ b/tests/cases/double_inputs.json @@ -0,0 +1,86 @@ +{ + "real": [ + [ + -2.9249265792461614, + -55.85012579473991 + ], + [ + -80.47715772186896, + -95.52360204965058 + ], + [ + -46.66317330650271, + -54.246799586133164 + ], + [ + 76.83020774845227, + -65.30072771420942 + ], + [ + -99.30113055934278, + 50.251117687128925 + ], + [ + 4.490698628165916, + -53.589861183978435 + ], + [ + 78.91935395791347, + -15.236501815329234 + ], + [ + -41.52820441856526, + -36.1797093396341 + ], + [ + -39.0871976088637, + -16.61234220198655 + ], + [ + -19.463566425184382, + -7.86900097049228 + ] + ], + "positive": [ + [ + 85.53554359280284, + 5.2077314883582915 + ], + [ + 71.05349046418226, + 59.57350881505704 + ], + [ + 94.503286127752, + 5.991036642740532 + ], + [ + 18.27907078802746, + 6.567889719237552 + ], + [ + 8.280279561927417, + 67.82173626757626 + ], + [ + 45.78565106549721, + 78.79955748827399 + ], + [ + 55.8027580912824, + 22.9918264565118 + ], + [ + 9.057003520483697, + 25.932550147696997 + ], + [ + 16.75232871348641, + 62.41335089010337 + ], + [ + 38.6284075569242, + 65.17474144156256 + ] + ] +} diff --git a/tests/cases/generate_math_cases.py b/tests/cases/generate_math_cases.py new file mode 100644 index 00000000..569ed68f --- /dev/null +++ b/tests/cases/generate_math_cases.py @@ -0,0 +1,34 @@ +import json +import random + + +valid_inputs_dict = {} + + +def main(): + num_reps = 10 + + single_inputs_dict = { + "real": [random.uniform(-100, 100) for _ in range(num_reps)], + "positive": [random.uniform(0, 100) for _ in range(num_reps)], + "minus_one_to_plus_one": [random.uniform(-1, +1) for _ in range(num_reps)], + "greater_than_one": [random.uniform(+1, 100) for _ in range(num_reps)], + } + with open("single_inputs.json", "w+") as f: + json.dump(single_inputs_dict, f, indent=True) + + double_inputs_dict = { + "real": [ + [random.uniform(-100, 100), random.uniform(-100, +100)] + for _ in range(num_reps) + ], + "positive": [ + [random.uniform(0, 100), random.uniform(0, +100)] for _ in range(num_reps) + ], + } + with open("double_inputs.json", "w+") as f: + json.dump(double_inputs_dict, f, indent=True) + + +if __name__ == "__main__": + main() diff --git a/tests/cases/single_inputs.json b/tests/cases/single_inputs.json new file mode 100644 index 00000000..95c4fade --- /dev/null +++ b/tests/cases/single_inputs.json @@ -0,0 +1,50 @@ +{ + "real": [ + 69.82891420951046, + 8.744849762337111, + 9.501568500140195, + -0.2697150283348435, + -85.39870720878719, + 61.19125397363706, + -83.8244247280742, + 18.175708127431946, + 89.59079964302475, + -7.696969156332287 + ], + "positive": [ + 89.27720557828748, + 31.22932658045743, + 39.91471622647749, + 76.33848530340148, + 9.676420060716396, + 26.249479197071125, + 78.52001380075208, + 42.43263280403463, + 11.746554290944955, + 49.031136773444885 + ], + "minus_one_to_plus_one": [ + 0.580566969475276, + 0.14866137712403948, + 0.16898539840736726, + -0.5004965202207128, + 0.7509607357841148, + 0.8848172839099921, + 0.5100407276275787, + 0.06547584726766464, + -0.1806707468903621, + -0.09163094454513665 + ], + "greater_than_one": [ + 27.47869311012394, + 86.15750156710183, + 34.31440803475614, + 71.90645538572487, + 4.247290853432516, + 21.366978683255986, + 52.47488483558816, + 55.67005996752599, + 71.12816870731481, + 85.93669459425669 + ] +} diff --git a/tests/helpers.py b/tests/helpers.py index 7dc7fcea..e9016a1d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,8 +1,14 @@ -import random from math import isnan, isinf import uncertainties.core as uncert_core -from uncertainties.core import ufloat, AffineScalarFunc +from uncertainties.core import ufloat, UFloat + + +def get_single_uatom(num_with_uncertainty: UFloat): + error_components = num_with_uncertainty.error_components + if len(error_components) > 1: + raise ValueError("UFloat has more than one error component.") + return next(iter(error_components.keys())) def power_all_cases(op): @@ -32,60 +38,69 @@ def power_all_cases(op): non_int_larger_than_one = ufloat(3.1, 0.01) positive_smaller_than_one = ufloat(0.3, 0.01) + assert integer.uncertainty.ucombo_tuple == () + + negative_uatom = get_single_uatom(negative) + positive_uatom = get_single_uatom(positive) + positive2_uatom = get_single_uatom(positive2) + one_uatom = get_single_uatom(one) + zero_uatom = get_single_uatom(zero) + zero2_uatom = get_single_uatom(zero2) + non_int_larger_than_one_uatom = get_single_uatom(non_int_larger_than_one) + positive_smaller_than_one_uatom = get_single_uatom(positive_smaller_than_one) ## negative**integer result = op(negative, integer) - assert not isnan(result.derivatives[negative]) - assert isnan(result.derivatives[integer]) + assert not isnan(result.error_components[negative_uatom]) # Limit cases: result = op(negative, one) - assert result.derivatives[negative] == 1 - assert isnan(result.derivatives[one]) + assert result.error_components[negative_uatom] == negative.std_dev + assert isnan(result.error_components[one_uatom]) result = op(negative, zero) - assert result.derivatives[negative] == 0 - assert isnan(result.derivatives[zero]) + assert result.error_components[negative_uatom] == 0 + assert isnan(result.error_components[zero_uatom]) ## negative**non-integer ## zero**... result = op(zero, non_int_larger_than_one) - assert isnan(result.derivatives[zero]) - assert result.derivatives[non_int_larger_than_one] == 0 + assert isnan(result.error_components[zero_uatom]) + assert result.error_components[non_int_larger_than_one_uatom] == 0 # Special cases: result = op(zero, one) - assert result.derivatives[zero] == 1 - assert result.derivatives[one] == 0 + assert result.error_components[zero_uatom] == zero.std_dev + assert result.error_components[one_uatom] == 0 result = op(zero, 2 * one) - assert result.derivatives[zero] == 0 - assert result.derivatives[one] == 0 + assert result.error_components[zero_uatom] == 0 + assert result.error_components[one_uatom] == 0 result = op(zero, positive_smaller_than_one) - assert isnan(result.derivatives[zero]) - assert result.derivatives[positive_smaller_than_one] == 0 + assert isnan(result.error_components[zero_uatom]) + assert result.error_components[positive_smaller_than_one_uatom] == 0 result = op(zero, zero2) - assert result.derivatives[zero] == 0 - assert isnan(result.derivatives[zero2]) + assert result.error_components[zero_uatom] == 0 + assert isnan(result.error_components[zero2_uatom]) ## positive**...: this is a quite regular case where the value and - ## the derivatives are all defined. + ## the error_components are all defined. result = op(positive, positive2) - assert not isnan(result.derivatives[positive]) - assert not isnan(result.derivatives[positive2]) + assert not isnan(result.error_components[positive_uatom]) + assert not isnan(result.error_components[positive2_uatom]) result = op(positive, zero) - assert result.derivatives[positive] == 0 - assert not isnan(result.derivatives[zero]) + assert result.error_components[positive_uatom] == 0 + assert not isnan(result.error_components[zero_uatom]) result = op(positive, negative) - assert not isnan(result.derivatives[positive]) - assert not isnan(result.derivatives[negative]) + assert not isnan(result.error_components[positive_uatom]) + assert not isnan(result.error_components[negative_uatom]) def power_special_cases(op): @@ -211,145 +226,6 @@ class DerivativesDiffer(Exception): pass -def compare_derivatives(func, numerical_derivatives, num_args_list=None): - """ - Checks the derivatives of a function 'func' (as returned by the - wrap() wrapper), by comparing them to the - 'numerical_derivatives' functions. - - Raises a DerivativesDiffer exception in case of problem. - - These functions all take the number of arguments listed in - num_args_list. If num_args is None, it is automatically obtained. - - Tests are done on random arguments. - """ - - try: - funcname = func.name - except AttributeError: - funcname = func.__name__ - - # print "Testing", func.__name__ - - if not num_args_list: - # Detecting automatically the correct number of arguments is not - # always easy (because not all values are allowed, etc.): - - num_args_table = { - "atanh": [1], - "log": [1, 2], # Both numbers of arguments are tested - } - if funcname in num_args_table: - num_args_list = num_args_table[funcname] - else: - num_args_list = [] - - # We loop until we find reasonable function arguments: - # We get the number of arguments by trial and error: - for num_args in range(10): - try: - #! Giving integer arguments is good for preventing - # certain functions from failing even though num_args - # is their correct number of arguments - # (e.g. math.ldexp(x, i), where i must be an integer) - func(*(1,) * num_args) - except TypeError: - pass # Not the right number of arguments - else: # No error - # num_args is a good number of arguments for func: - num_args_list.append(num_args) - - if not num_args_list: - raise Exception( - "Can't find a reasonable number of arguments" - " for function '%s'." % funcname - ) - - for num_args in num_args_list: - # Argument numbers that will have a random integer value: - integer_arg_nums = set() - - if funcname == "ldexp": - # The second argument must be an integer: - integer_arg_nums.add(1) - - while True: - try: - # We include negative numbers, for more thorough tests: - args = [] - for arg_num in range(num_args): - if arg_num in integer_arg_nums: - args.append(random.choice(range(-10, 10))) - else: - args.append(uncert_core.Variable(random.random() * 4 - 2, 0)) - - # 'args', but as scalar values: - args_scalar = [uncert_core.nominal_value(v) for v in args] - - func_approx = func(*args) - - # Some functions yield simple Python constants, after - # wrapping in wrap(): no test has to be performed. - # Some functions also yield tuples... - if isinstance(func_approx, AffineScalarFunc): - # We compare all derivatives: - for arg_num, (arg, numerical_deriv) in enumerate( - zip(args, numerical_derivatives) - ): - # Some arguments might not be differentiable: - if isinstance(arg, int): - continue - - fixed_deriv_value = func_approx.derivatives[arg] - - num_deriv_value = numerical_deriv(*args_scalar) - - # This message is useful: the user can see that - # tests are really performed (instead of not being - # performed, silently): - print( - "Testing derivative #%d of %s at %s" - % (arg_num, funcname, args_scalar) - ) - - if not numbers_close(fixed_deriv_value, num_deriv_value, 1e-4): - # It is possible that the result is NaN: - if not isnan(func_approx): - raise DerivativesDiffer( - "Derivative #%d of function '%s' may be" - " wrong: at args = %s," - " value obtained = %.16f," - " while numerical approximation = %.16f." - % ( - arg_num, - funcname, - args, - fixed_deriv_value, - num_deriv_value, - ) - ) - - except ValueError as err: # Arguments out of range, or of wrong type - # Factorial(real) lands here: - if str(err).startswith("factorial"): - integer_arg_nums = set([0]) - continue # We try with different arguments - # Some arguments might have to be integers, for instance: - except TypeError as err: - if len(integer_arg_nums) == num_args: - raise Exception( - "Incorrect testing procedure: unable to " - "find correct argument values for %s: %s" % (funcname, err) - ) - - # Another argument might be forced to be an integer: - integer_arg_nums.add(random.choice(range(num_args))) - else: - # We have found reasonable arguments, and the test passed: - break - - ############################################################################### diff --git a/tests/test_formatting.py b/tests/test_formatting.py index 19106420..f1242690 100644 --- a/tests/test_formatting.py +++ b/tests/test_formatting.py @@ -38,10 +38,6 @@ def test_repr(): x = ufloat(3.14159265358979, 0) assert repr(x) == "3.14159265358979+/-0" - # Tagging: - x = ufloat(3, 1, "length") - assert repr(x) == "< length = 3.0+/-1.0 >" - # The way NaN is formatted with F, E and G depends on the version of Python (NAN for # Python 2.5+ at least): diff --git a/tests/test_umath.py b/tests/test_umath.py index 354e7588..9c7477ea 100644 --- a/tests/test_umath.py +++ b/tests/test_umath.py @@ -1,68 +1,162 @@ +import itertools +import json import math from math import isnan +from pathlib import Path +import pytest + +from helpers import get_single_uatom from uncertainties import ufloat import uncertainties.core as uncert_core import uncertainties.umath_core as umath_core +from uncertainties import umath from helpers import ( power_special_cases, power_all_cases, power_wrt_ref, - compare_derivatives, numbers_close, + ufloats_close, ) ############################################################################### # Unit tests -def test_fixed_derivatives_math_funcs(): - """ - Comparison between function derivatives and numerical derivatives. +single_input_data_path = Path(Path(__file__).parent, "cases", "single_inputs.json") +with open(single_input_data_path, "r") as f: + single_input_dict = json.load(f) + +double_input_data_path = Path(Path(__file__).parent, "cases", "double_inputs.json") +with open(double_input_data_path, "r") as f: + double_input_dict = json.load(f) + +real_single_input_funcs = ( + "asinh", + "atan", + "cos", + "cosh", + "degrees", + "erf", + "erfc", + "exp", + "expm1", + "fabs", + "gamma", + "lgamma", + "radians", + "sin", + "sinh", + "tan", + "tanh", +) +positive_single_input_funcs = ( + "log", + "log1p", + "log10", + "sqrt", +) +minus_one_to_plus_one_single_input_funcs = ( + "acos", + "asin", + "atanh", +) +greater_than_one_single_input_funcs = ("acosh",) - This comparison is useful for derivatives that are analytical. - """ +real_single_input_cases = list( + itertools.product(real_single_input_funcs, single_input_dict["real"]) +) +positive_single_input_cases = list( + itertools.product(positive_single_input_funcs, single_input_dict["positive"]) +) +minus_one_to_plus_one_single_input_cases = list( + itertools.product( + minus_one_to_plus_one_single_input_funcs, + single_input_dict["minus_one_to_plus_one"], + ) +) +greater_than_one_single_input_cases = list( + itertools.product( + greater_than_one_single_input_funcs, + single_input_dict["greater_than_one"], + ) +) +single_input_cases = ( + real_single_input_cases + + positive_single_input_cases + + minus_one_to_plus_one_single_input_cases + + greater_than_one_single_input_cases +) - for name in umath_core.many_scalars_to_scalar_funcs: - func = getattr(umath_core, name) - # Numerical derivatives of func: the nominal value of func() results - # is used as the underlying function: - numerical_derivatives = uncert_core.NumericalDerivatives( - lambda *args: func(*args) - ) - compare_derivatives(func, numerical_derivatives) - # Functions that are not in umath_core.many_scalars_to_scalar_funcs: +@pytest.mark.parametrize("func, value", single_input_cases) +def test_single_input_func_derivatives(func, value): + uval = ufloat(value, 1.0) + uatom = get_single_uatom(uval) - ## - # modf(): returns a tuple: - def frac_part_modf(x): - return umath_core.modf(x)[0] + umath_func = getattr(umath, func) + result = umath_func(uval) + analytic_deriv = result.error_components[uatom] - def int_part_modf(x): - return umath_core.modf(x)[1] + math_func = getattr(math, func) + numerical_deriv = uncert_core.partial_derivative(math_func, 0)(value) + + assert math.isclose(analytic_deriv, numerical_deriv, rel_tol=1e-6, abs_tol=1e-6) - compare_derivatives( - frac_part_modf, uncert_core.NumericalDerivatives(lambda x: frac_part_modf(x)) - ) - compare_derivatives( - int_part_modf, uncert_core.NumericalDerivatives(lambda x: int_part_modf(x)) - ) - ## - # frexp(): returns a tuple: - def mantissa_frexp(x): - return umath_core.frexp(x)[0] +real_double_input_funcs = ( + "atan2", + "copysign", + "fmod", + "pow", +) - def exponent_frexp(x): - return umath_core.frexp(x)[1] +positive_double_input_funcs = ( + "hypot", + "log", +) - compare_derivatives( - mantissa_frexp, uncert_core.NumericalDerivatives(lambda x: mantissa_frexp(x)) - ) - compare_derivatives( - exponent_frexp, uncert_core.NumericalDerivatives(lambda x: exponent_frexp(x)) - ) +real_double_input_cases = list( + itertools.product(real_double_input_funcs, *zip(*double_input_dict["real"])) +) +print(real_double_input_cases) +positive_double_input_cases = list( + itertools.product(positive_double_input_funcs, *zip(*double_input_dict["positive"])) +) + +double_input_cases = real_double_input_cases + positive_double_input_cases + + +@pytest.mark.parametrize("func, value_0, value_1", double_input_cases) +def test_double_input_func_derivatives(func, value_0, value_1): + if func == "pow": + value_0 = abs(value_0) + + uval_0 = ufloat(value_0, 1.0) + uval_1 = ufloat(value_1, 1.0) + + uatom_0 = get_single_uatom(uval_0) + uatom_1 = get_single_uatom(uval_1) + + math_func = getattr(math, func) + umath_func = getattr(umath, func) + + result = umath_func(uval_0, uval_1) + + analytic_deriv_0 = result.error_components[uatom_0] + analytic_deriv_1 = result.error_components[uatom_1] + + numerical_deriv_0 = uncert_core.partial_derivative( + math_func, + 0, + )(value_0, value_1) + numerical_deriv_1 = uncert_core.partial_derivative( + math_func, + 1, + )(value_0, value_1) + + assert math.isclose(analytic_deriv_0, numerical_deriv_0, rel_tol=1e-6, abs_tol=1e-6) + assert math.isclose(analytic_deriv_1, numerical_deriv_1, rel_tol=1e-6, abs_tol=1e-6) def test_compound_expression(): @@ -71,9 +165,9 @@ def test_compound_expression(): """ x = ufloat(3, 0.1) - - # Prone to numerical errors (but not much more than floats): - assert umath_core.tan(x) == umath_core.sin(x) / umath_core.cos(x) + y1 = umath_core.tan(x) + y2 = umath_core.sin(x) / umath_core.cos(x) + assert ufloats_close(y1, y2) def test_numerical_example(): @@ -271,11 +365,13 @@ def test_hypot(): """ x = ufloat(0, 1) y = ufloat(0, 2) + x_uatom = get_single_uatom(x) + y_uatom = get_single_uatom(y) # Derivatives that cannot be calculated simply return NaN, with no # exception being raised, normally: result = umath_core.hypot(x, y) - assert isnan(result.derivatives[x]) - assert isnan(result.derivatives[y]) + assert isnan(result.error_components[x_uatom]) + assert isnan(result.error_components[y_uatom]) def test_power_all_cases(): diff --git a/tests/test_uncertainties.py b/tests/test_uncertainties.py index 0cb81807..eb1cc813 100644 --- a/tests/test_uncertainties.py +++ b/tests/test_uncertainties.py @@ -1,24 +1,28 @@ import copy import math -import random # noqa +import gc +import pickle +import random import pytest import uncertainties.core as uncert_core -from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr +from uncertainties.core import ufloat, ufloat_fromstr from uncertainties import ( umath, + UFloat, correlated_values, correlated_values_norm, correlation_matrix, ) +from uncertainties.ops import partial_derivative from helpers import ( + get_single_uatom, power_special_cases, power_all_cases, power_wrt_ref, numbers_close, ufloats_close, - compare_derivatives, ) @@ -28,298 +32,247 @@ np = None -def test_value_construction(): - """ - Tests the various means of constructing a constant number with - uncertainty *without a string* (see test_ufloat_fromstr(), for this). - """ - - ## Simple construction: - x = ufloat(3, 0.14) +def test_UFloat_class_construction(): + """Test creating UFloat directly.""" + x = UFloat(3, 0.14) assert x.nominal_value == 3 assert x.std_dev == 0.14 - assert x.tag is None + assert get_single_uatom(x).tag is None - # ... with tag as positional argument: - x = ufloat(3, 0.14, "pi") + x = UFloat(3, 0.14, "pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 - assert x.tag == "pi" + assert get_single_uatom(x).tag == "pi" - # ... with tag keyword: - x = ufloat(3, 0.14, tag="pi") + x = UFloat(3, 0.14, tag="pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 - assert x.tag == "pi" + assert get_single_uatom(x).tag == "pi" - # Negative standard deviations should be caught in a nice way - # (with the right exception): - try: - x = ufloat(3, -0.1) - except uncert_core.NegativeStdDev: - pass + with pytest.raises(uncert_core.NegativeStdDev): + _ = UFloat(3, -0.1) - ## Incorrect forms should not raise any deprecation warning, but - ## raise an exception: - - try: - ufloat(1) # Form that has never been allowed - except TypeError: - pass - else: - raise Exception("An exception should be raised") - - -def test_ufloat_fromstr(): - "Input of numbers with uncertainties as a string" - - # String representation, and numerical values: - tests = { - "-1.23(3.4)": (-1.23, 3.4), # (Nominal value, error) - " -1.23(3.4) ": (-1.23, 3.4), # Spaces ignored - "-1.34(5)": (-1.34, 0.05), - "1(6)": (1, 6), - "3(4.2)": (3, 4.2), - "-9(2)": (-9, 2), - "1234567(1.2)": (1234567, 1.2), - "12.345(15)": (12.345, 0.015), - "-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6), - "0.29": (0.29, 0.01), - "31.": (31, 1), - "-31.": (-31, 1), - # The following tests that the ufloat() routine does - # not consider '31' like the tuple ('3', '1'), which would - # make it expect two numbers (instead of 2 1-character - # strings): - "31": (31, 1), - "-3.1e10": (-3.1e10, 0.1e10), - "169.0(7)": (169, 0.7), - "-0.1+/-1": (-0.1, 1), - "-13e-2+/-1e2": (-13e-2, 1e2), - "-14.(15)": (-14, 15), - "-100.0(15)": (-100, 1.5), - "14.(15)": (14, 15), - # Global exponent: - "(3.141+/-0.001)E+02": (314.1, 0.1), - ## Pretty-print notation: - # ± sign, global exponent (not pretty-printed): - "(3.141±0.001)E+02": (314.1, 0.1), - # ± sign, individual exponent: - "3.141E+02±0.001e2": (314.1, 0.1), - # ± sign, times symbol, superscript (= full pretty-print): - "(3.141 ± 0.001) × 10²": (314.1, 0.1), - ## Others - # Forced parentheses: - "(2 +/- 0.1)": (2, 0.1), - # NaN uncertainty: - "(3.141±nan)E+02": (314.1, float("nan")), - "3.141e+02+/-nan": (314.1, float("nan")), - "3.4(nan)e10": (3.4e10, float("nan")), - # NaN value: - "nan+/-3.14e2": (float("nan"), 314), - # "Double-floats" - "(-3.1415 +/- 1e-4)e+200": (-3.1415e200, 1e196), - "(-3.1415e-10 +/- 1e-4)e+200": (-3.1415e190, 1e196), - # Special float representation: - "-3(0.)": (-3, 0), - } - - for representation, values in tests.items(): - # We test the fact that surrounding spaces are removed: - representation = " {} ".format(representation) - - # Without tag: - num = ufloat_fromstr(representation) - assert numbers_close(num.nominal_value, values[0]) - assert numbers_close(num.std_dev, values[1]) - assert num.tag is None - - # With a tag as positional argument: - num = ufloat_fromstr(representation, "test variable") - assert numbers_close(num.nominal_value, values[0]) - assert numbers_close(num.std_dev, values[1]) - assert num.tag == "test variable" - - # With a tag as keyword argument: - num = ufloat_fromstr(representation, tag="test variable") - assert numbers_close(num.nominal_value, values[0]) - assert numbers_close(num.std_dev, values[1]) - assert num.tag == "test variable" + with pytest.raises(TypeError): + UFloat(1) -############################################################################### - - -# Test of correctness of the fixed (usually analytical) derivatives: -def test_fixed_derivatives_basic_funcs(): - """ - Pre-calculated derivatives for operations on AffineScalarFunc. - """ - - def check_op(op, num_args): - """ - Makes sure that the derivatives for function '__op__' of class - AffineScalarFunc, which takes num_args arguments, are correct. - - If num_args is None, a correct value is calculated. - """ - - op_string = "__%s__" % op - func = getattr(AffineScalarFunc, op_string) - numerical_derivatives = uncert_core.NumericalDerivatives( - # The __neg__ etc. methods of AffineScalarFunc only apply, - # by definition, to AffineScalarFunc objects: we first map - # possible scalar arguments (used for calculating - # derivatives) to AffineScalarFunc objects: - lambda *args: func(*map(uncert_core.to_affine_scalar, args)) - ) - compare_derivatives(func, numerical_derivatives, [num_args]) +def test_ufloat_function_construction(): + """Test creating UFloat via ufloat() function.""" + x = ufloat(3, 0.14) + assert x.nominal_value == 3 + assert x.std_dev == 0.14 + assert get_single_uatom(x).tag is None - # Operators that take 1 value: - for op in uncert_core.modified_operators: - check_op(op, 1) + x = ufloat(3, 0.14, "pi") + assert x.nominal_value == 3 + assert x.std_dev == 0.14 + assert get_single_uatom(x).tag == "pi" - # Operators that take 2 values: - for op in uncert_core.modified_ops_with_reflection: - check_op(op, 2) + # ... with tag keyword: + x = ufloat(3, 0.14, tag="pi") + assert x.nominal_value == 3 + assert x.std_dev == 0.14 + assert get_single_uatom(x).tag == "pi" + + with pytest.raises(uncert_core.NegativeStdDev): + _ = ufloat(3, -0.1) + + with pytest.raises(TypeError): + ufloat(1) + + +ufloat_from_str_cases = [ + ("-1.23(3.4)", -1.23, 3.4), + (" -1.23(3.4) ", -1.23, 3.4), # Test that leading and trailing spaces are ignored + ("-1.34(5)", -1.34, 0.05), + ("1(6)", 1, 6), + ("3(4.2)", 3, 4.2), + ("-9(2)", -9, 2), + ("1234567(1.2)", 1234567, 1.2), + ("12.345(15)", 12.345, 0.015), + ("-12.3456(78)e-6", -12.3456e-6, 0.0078e-6), + ("0.29", 0.29, 0.01), + ("31.", 31, 1), + ("-31.", -31, 1), + # The following tests that the ufloat() routine does + # not consider '31' like the tuple ('3', '1'), which would + # make it expect two numbers (instead of 2 1-character + # strings): + ("31", 31, 1), + ("-3.1e10", -3.1e10, 0.1e10), + ("169.0(7)", 169, 0.7), + ("-0.1+/-1", -0.1, 1), + ("-13e-2+/-1e2", -13e-2, 1e2), + ("-14.(15)", -14, 15), + ("-100.0(15)", -100, 1.5), + ("14.(15)", 14, 15), + # Global exponent: + ("(3.141+/-0.001)E+02", 314.1, 0.1), + ## Pretty-print notation: + # ± sign, global exponent (not pretty-printed): + ("(3.141±0.001)E+02", 314.1, 0.1), + # ± sign, individual exponent: + ("3.141E+02±0.001e2", 314.1, 0.1), + # ± sign, times symbol, superscript (= full pretty-print): + ("(3.141 ± 0.001) × 10²", 314.1, 0.1), + ## Others + # Forced parentheses: + ("(2 +/- 0.1)", 2, 0.1), + # NaN uncertainty: + ("(3.141±nan)E+02", 314.1, float("nan")), + ("3.141e+02+/-nan", 314.1, float("nan")), + ("3.4(nan)e10", 3.4e10, float("nan")), + # NaN value: + ("nan+/-3.14e2", float("nan"), 314), + # Special float representation: + ("-3(0.)", -3, 0), +] + + +@pytest.mark.parametrize("input_str,nominal_value,std_dev", ufloat_from_str_cases) +def test_ufloat_fromstr(input_str, nominal_value, std_dev): + num = ufloat_fromstr(input_str) + assert numbers_close(num.nominal_value, nominal_value) + assert numbers_close(num.std_dev, std_dev) + if std_dev != 0: + assert get_single_uatom(num).tag is None + else: + assert num.uncertainty.ucombo_tuple == () + + # With a tag as positional argument: + num = ufloat_fromstr(input_str, "test variable") + assert numbers_close(num.nominal_value, nominal_value) + assert numbers_close(num.std_dev, std_dev) + if std_dev != 0: + assert get_single_uatom(num).tag == "test variable" + else: + assert num.uncertainty.ucombo_tuple == () + + # With a tag as keyword argument: + num = ufloat_fromstr(input_str, tag="test variable") + assert numbers_close(num.nominal_value, nominal_value) + assert numbers_close(num.std_dev, std_dev) + if std_dev != 0: + assert get_single_uatom(num).tag == "test variable" + else: + assert num.uncertainty.ucombo_tuple == () + + +# Randomly generated but static test values. +deriv_propagation_cases = [ + ("__abs__", (1.1964838601545966,), 0.047308407404731856), + ("__pos__", (1.5635699242286414,), 0.38219529954774223), + ("__neg__", (-0.4520304708235554,), 0.8442835926901457), + ("__trunc__", (0.4622631416873926,), 0.6540076679531033), + ("__add__", (-0.7581877519537352, 1.6579645792821753), 0.5083165826806606), + ("__radd__", (-0.976869259500134, 1.1542019729184076), 0.732839320238539), + ("__sub__", (1.0233545960703134, 0.029354693323845993), 0.7475621525040559), + ("__rsub__", (0.49861518245313663, -0.9927317702800833), 0.5421488555485847), + ("__mul__", (0.0654070362874073, 1.9216078105121919), 0.6331001122119122), + ("__rmul__", (-0.4006772142682373, 0.19628658198222926), 0.3300416314362784), + ("__truediv__", (-0.5573378968194893, 0.28646277014641486), 0.42933306560556384), + ("__rtruediv__", (1.7663869752268884, -0.1619387546963642), 0.6951025849642374), + ("__floordiv__", (0.11750026664733992, -1.0120567560937617), 0.9557126076209381), + ("__rfloordiv__", (-1.2872736512072698, -1.4416464249395973), 0.28262518984780205), + ("__pow__", (0.34371967038364515, -0.8313605840956209), 0.6267147080961244), + ("__rpow__", (1.593375683248082, 1.9890969272006154), 0.7171353266792271), + ("__mod__", (0.7478106873313131, 1.2522332955942628), 0.5682413634363304), + ("__rmod__", (1.5227432102303133, -0.5177923078991333), 0.25752786270795935), +] + + +@pytest.mark.parametrize("func_name, args, std_dev", deriv_propagation_cases) +def test_deriv_propagation(func_name, args, std_dev): + func = getattr(UFloat, func_name) + ufloat_args = [UFloat(arg, std_dev) for arg in args] + output = func(*ufloat_args) + + for idx, _ in enumerate(ufloat_args): + deriv = partial_derivative(func, idx)(*args) + for atom, input_weight in ufloat_args[idx].error_components.items(): + output_weight = output.error_components[atom] + assert numbers_close(output_weight, deriv * input_weight) def test_copy(): - "Standard copy module integration" - import gc - + """Standard copy module integration.""" x = ufloat(3, 0.1) assert x == x + x_uatom = get_single_uatom(x) + y = copy.copy(x) - assert x != y - assert not (x == y) - assert y in y.derivatives.keys() # y must not copy the dependence on x + assert y == x + assert get_single_uatom(y) == x_uatom z = copy.deepcopy(x) - assert x != z + assert z == x + assert get_single_uatom(z) == x_uatom - # Copy tests on expressions: t = x + 2 * z - # t depends on x: - assert x in t.derivatives + assert x_uatom in t.error_components - # The relationship between the copy of an expression and the - # original variables should be preserved: t_copy = copy.copy(t) - # Shallow copy: the variables on which t depends are not copied: - assert x in t_copy.derivatives + assert x_uatom in t_copy.error_components assert uncert_core.covariance_matrix([t, z]) == uncert_core.covariance_matrix( [t_copy, z] ) - # However, the relationship between a deep copy and the original - # variables should be broken, since the deep copy created new, - # independent variables: t_deepcopy = copy.deepcopy(t) - assert x not in t_deepcopy.derivatives - assert uncert_core.covariance_matrix([t, z]) != uncert_core.covariance_matrix( + assert x_uatom in t_deepcopy.error_components + assert uncert_core.covariance_matrix([t, z]) == uncert_core.covariance_matrix( [t_deepcopy, z] ) - # Test of implementations with weak references: - - # Weak references: destroying a variable should never destroy the - # integrity of its copies (which would happen if the copy keeps a - # weak reference to the original, in its derivatives member: the - # weak reference to the original would become invalid): del x - gc.collect() + assert x_uatom in y.error_components - assert y in list(y.derivatives.keys()) +""" +Classes to test pickling with different types of __slots__ inheritance +""" -## Classes for the pickling tests (put at the module level, so that -## they can be unpickled): - -# Subclass without slots: -class NewVariable_dict(uncert_core.Variable): +class UFloatDict(UFloat): pass -# Subclass with slots defined by a tuple: -class NewVariable_slots_tuple(uncert_core.Variable): +class UFloatSlotsTuple(UFloat): __slots__ = ("new_attr",) -# Subclass with slots defined by a string: -class NewVariable_slots_str(uncert_core.Variable): +class UFloatSlotsStr(UFloat): __slots__ = "new_attr" def test_pickling(): - "Standard pickle module integration." - - import pickle - - x = ufloat(2, 0.1) - + """Standard pickle module integration.""" + x = UFloat(2, 0.1) x_unpickled = pickle.loads(pickle.dumps(x)) - assert x != x_unpickled # Pickling creates copies + assert x_unpickled == x - ## Tests with correlations and AffineScalarFunc objects: f = 2 * x - assert isinstance(f, AffineScalarFunc) - (f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x))) - # Correlations must be preserved: - assert f_unpickled - x_unpickled2 - x_unpickled2 == 0 + assert isinstance(f, UFloat) - ## Tests with subclasses: + f_unpickled, x_unpickled2 = pickle.loads(pickle.dumps((f, x))) + assert f_unpickled - 2 * x_unpickled2 == 0 - for subclass in (NewVariable_dict, NewVariable_slots_tuple, NewVariable_slots_str): + for subclass in (UFloatDict, UFloatSlotsTuple, UFloatSlotsStr): x = subclass(3, 0.14) # Pickling test with possibly uninitialized slots: - pickle.loads(pickle.dumps(x)) + assert pickle.loads(pickle.dumps(x)) == x # Unpickling test: x.new_attr = "New attr value" x_unpickled = pickle.loads(pickle.dumps(x)) - # Must exist (from the slots of the parent class): - x_unpickled.nominal_value - x_unpickled.new_attr # Must exist - - ## - - # Corner case test: when an attribute is present both in __slots__ - # and in __dict__, it is first looked up from the slots - # (references: - # http://docs.python.org/2/reference/datamodel.html#invoking-descriptors, - # http://stackoverflow.com/a/15139208/42973). As a consequence, - # the pickling process must pickle the correct value (i.e., not - # the value from __dict__): - x = NewVariable_dict(3, 0.14) - x._nominal_value = "in slots" - # Corner case: __dict__ key which is also a slot name (it is - # shadowed by the corresponding slot, so this is very unusual, - # though): - x.__dict__["_nominal_value"] = "in dict" - # Additional __dict__ attribute: - x.dict_attr = "dict attribute" - - x_unpickled = pickle.loads(pickle.dumps(x)) - # We make sure that the data is still there and untouched: - assert x_unpickled._nominal_value == "in slots" - assert x_unpickled.__dict__ == x.__dict__ - - ## + assert x_unpickled == x + assert x_unpickled.new_attr == "New attr value" - # Corner case that should have no impact on the code but which is - # not prevented by the documentation: case of constant linear - # terms (the potential gotcha is that if the linear_combo - # attribute is empty, __getstate__()'s result could be false, and - # so __setstate__() would not be called and the original empty - # linear combination would not be set in linear_combo. - x = uncert_core.LinearCombination({}) - assert pickle.loads(pickle.dumps(x)).linear_combo == {} + x = uncert_core.UCombo(()) + assert pickle.loads(pickle.dumps(x)).ucombo_tuple == () def test_int_div(): @@ -485,50 +438,42 @@ def test_basic_access_to_data(): "Access to data from Variable and AffineScalarFunc objects." x = ufloat(3.14, 0.01, "x var") - assert x.tag == "x var" + assert get_single_uatom(x).tag == "x var" assert x.nominal_value == 3.14 assert x.std_dev == 0.01 # Case of AffineScalarFunc objects: y = x + 0 - assert type(y) == AffineScalarFunc + assert type(y) == UFloat assert y.nominal_value == 3.14 assert y.std_dev == 0.01 # Details on the sources of error: a = ufloat(-1, 0.001) y = 2 * x + 3 * x + 2 + a - error_sources = y.error_components() - assert len(error_sources) == 2 # 'a' and 'x' - assert error_sources[x] == 0.05 - assert error_sources[a] == 0.001 + error_sources = y.error_components + assert len(error_sources) == 2 + # 'a' and 'x' + assert y.covariance(x) == 0.05 * 0.01 + assert y.covariance(a) == 0.001 * 0.001 - # Derivative values should be available: - assert y.derivatives[x] == 5 - - # Modification of the standard deviation of variables: - x.std_dev = 1 - assert y.error_components()[x] == 5 # New error contribution! + with pytest.raises(AttributeError): + # std_dev cannot be modified + x.std_dev = 1 # Calculated values with uncertainties should not have a settable # standard deviation: y = 2 * x - try: + with pytest.raises(AttributeError): y.std_dev = 1 - except AttributeError: - pass - else: - raise Exception("std_dev should not be settable for calculated results") # Calculation of deviations in units of the standard deviations: assert 10 / x.std_dev == x.std_score(10 + x.nominal_value) # "In units of the standard deviation" is not always meaningful: - x.std_dev = 0 - try: + x = ufloat(1, 0) + with pytest.raises(ValueError): x.std_score(1) - except ValueError: - pass # Normal behavior def test_correlations(): @@ -937,6 +882,9 @@ def f(x, y, *args, **kwargs): z = ufloat(100, 0.111) t = ufloat(0.1, 0.1111) + z_uatom = get_single_uatom(z) + t_uatom = get_single_uatom(t) + assert ufloats_close( f_wrapped(x, y, z, t=t), f_auto_unc(x, y, z, t=t), tolerance=1e-5 ) @@ -955,8 +903,8 @@ def f(x, y, *args, **kwargs): # to try to confuse the code: assert ( - f_wrapped2(x, y, z, t=t).derivatives[y] - == f_auto_unc(x, y, z, t=t).derivatives[y] + f_wrapped2(x, y, z, t=t).error_components[z_uatom] + == f_auto_unc(x, y, z, t=t).error_components[z_uatom] ) # Derivatives supplied through the keyword-parameter dictionary of @@ -972,12 +920,12 @@ def f(x, y, *args, **kwargs): # The derivatives should be exactly the same, because they are # obtained with the exact same analytic formula: assert ( - f_wrapped3(x, y, z, t=t).derivatives[z] - == f_auto_unc(x, y, z, t=t).derivatives[z] + f_wrapped3(x, y, z, t=t).error_components[z_uatom] + == f_auto_unc(x, y, z, t=t).error_components[z_uatom] ) assert ( - f_wrapped3(x, y, z, t=t).derivatives[t] - == f_auto_unc(x, y, z, t=t).derivatives[t] + f_wrapped3(x, y, z, t=t).error_components[t_uatom] + == f_auto_unc(x, y, z, t=t).error_components[t_uatom] ) ######################################## diff --git a/tests/test_unumpy.py b/tests/test_unumpy.py index 783fd336..711b1621 100644 --- a/tests/test_unumpy.py +++ b/tests/test_unumpy.py @@ -9,7 +9,7 @@ import uncertainties.core as uncert_core from uncertainties import ufloat, unumpy from uncertainties.unumpy import core -from helpers import numbers_close, uarrays_close +from helpers import numbers_close, uarrays_close, get_single_uatom def test_numpy(): @@ -81,29 +81,13 @@ def test_matrix(): # Test of the nominal_value attribute: assert numpy.all(m_nominal_values == m.nominal_values) - assert type(m[0, 0]) == uncert_core.Variable + assert type(m[0, 0]) == uncert_core.UFloat # Test of scalar multiplication, both sides: 3 * m m * 3 -def derivatives_close(x, y): - """ - Returns True iff the AffineScalarFunc objects x and y have - derivatives that are close to each other (they must depend - on the same variables). - """ - - # x and y must depend on the same variables: - if set(x.derivatives) != set(y.derivatives): - return False # Not the same variables - - return all( - numbers_close(x.derivatives[var], y.derivatives[var]) for var in x.derivatives - ) - - def test_inverse(): "Tests of the matrix inverse" @@ -139,6 +123,7 @@ def test_inverse(): # Checks of the covariances between elements: x = ufloat(10, 1) + x_uatom = get_single_uatom(x) m = unumpy.matrix([[x, x], [0, 3 + 2 * x]]) m_inverse = m.I @@ -152,18 +137,16 @@ def test_inverse(): assert uarrays_close(m_double_inverse, m) - # Partial test: - assert derivatives_close(m_double_inverse[0, 0], m[0, 0]) - assert derivatives_close(m_double_inverse[1, 1], m[1, 1]) - #################### # Tests of covariances during the inversion: # There are correlations if both the next two derivatives are # not zero: - assert m_inverse[0, 0].derivatives[x] - assert m_inverse[0, 1].derivatives[x] + assert x_uatom in m_inverse[0, 0].error_components + assert m_inverse[0, 0].error_components[x_uatom] != 0 + assert x_uatom in m_inverse[0, 1].error_components + assert m_inverse[0, 1].error_components[x_uatom] != 0 # Correlations between m and m_inverse should create a perfect # inversion: @@ -179,7 +162,7 @@ def test_wrap_array_func(): # Function that works with numbers with uncertainties in mat (if # mat is an uncertainties.unumpy.matrix): def f_unc(mat, *args, **kwargs): - return mat.I + args[0] * kwargs["factor"] + return numpy.matrix(mat).I + args[0] * kwargs["factor"] # Test with optional arguments and keyword arguments: def f(mat, *args, **kwargs): @@ -189,7 +172,7 @@ def f(mat, *args, **kwargs): return f_unc(mat, *args, **kwargs) # Wrapped function: - f_wrapped = core.wrap_array_func(f) + f_wrapped = core.to_uarray_func(f) ########## # Full rank rectangular matrix: @@ -207,7 +190,7 @@ def test_pseudo_inverse(): "Tests of the pseudo-inverse" # Numerical version of the pseudo-inverse: - pinv_num = core.wrap_array_func(numpy.linalg.pinv) + pinv_num = core.to_uarray_func(numpy.linalg.pinv) ########## # Full rank rectangular matrix: diff --git a/uncertainties/core.py b/uncertainties/core.py index c521a87f..8676559f 100644 --- a/uncertainties/core.py +++ b/uncertainties/core.py @@ -15,10 +15,8 @@ from __future__ import division # Many analytical derivatives depend on this from builtins import str, zip, range, object -from math import sqrt, isfinite # Optimization: no attribute look-up - -import copy -import collections +from math import isnan, sqrt # Optimization: no attribute look-up +from typing import Optional, Union from uncertainties.formatting import format_ufloat from uncertainties.parsing import str_to_number_with_uncert @@ -31,6 +29,7 @@ modified_operators, modified_ops_with_reflection, ) +from uncertainties.ucombo import UCombo, UAtom # Attributes that are always exported (some other attributes are # exported only if the NumPy module is available...): @@ -49,7 +48,6 @@ # Variable subclass), but possibly manipulated by external code # ['derivatives()' method, etc.]. "UFloat", - "Variable", # Wrapper for allowing non-pure-Python function to handle # quantitities with uncertainties: "wrap", @@ -188,23 +186,26 @@ def correlated_values_norm(values_with_std_dev, correlation_mat, tags=None): # Creation of new, independent variables: - # We use the fact that the eigenvectors in 'transform' are - # special: 'transform' is unitary: its inverse is its transpose: - - variables = tuple( - # The variables represent "pure" uncertainties: - Variable(0, sqrt(variance), tag) - for (variance, tag) in zip(variances, tags) + ind_vars = tuple( + UCombo(((UAtom(tag), sqrt(variance)),)) + for variance, tag in zip(variances, tags) ) # The coordinates of each new uncertainty as a function of the # new variables must include the variable scale (standard deviation): transform *= std_devs[:, numpy.newaxis] + corr_vars = [] + for sub_coords in transform: + corr_var = sum( + (ind_var * coord for ind_var, coord in zip(ind_vars, sub_coords)), + UCombo(()), + ) + corr_vars.append(corr_var) + # Representation of the initial correlated values: values_funcs = tuple( - AffineScalarFunc(value, LinearCombination(dict(zip(variables, coords)))) - for (coords, value) in zip(transform, nominal_values) + UFloat(value, corr_var) for (corr_var, value) in zip(corr_vars, nominal_values) ) return values_funcs @@ -258,310 +259,111 @@ def __getitem__(self, n): ######################################## -class LinearCombination(object): +class UFloat(object): """ - Linear combination of Variable differentials. - - The linear_combo attribute can change formally, but its value - always remains the same. Typically, the linear combination can - thus be expanded. - - The expanded form of linear_combo is a mapping from Variables to - the coefficient of their differential. + UFloat objects represent random variables. + + UFloat objects are represented by a float nominal value which gives + the mean of the abstract random variable and a UCombo uncertainty. + The UCombo uncertainty is a linear combination of UAtom where each + UAtom is like a random variable with zero mean and unity variance. + + The variance and standard deviation of the UFloat random variable + can be calculated using the uncertainty UCombo. Also, if two UFloat + objects share uncertainty dependence on any shared UAtoms then those + two UFloat's may exhibit correlations which can be calculated. + + Finally, UFloat objects can pass through simple or complicated + mathematical operations such as addition or trigonometric + manipulations. The result of these operations will be new UFloat + objects whose uncertainty is calculated according to the rules of + linear error propagation. """ - # ! Invariant: linear_combo is represented internally exactly as - # the linear_combo argument to __init__(): - __slots__ = "linear_combo" - - def __init__(self, linear_combo): - """ - linear_combo can be modified by the object, during its - lifetime. This allows the object to change its internal - representation over time (for instance by expanding the linear - combination and replacing the original expression with the - expanded one). - - linear_combo -- if linear_combo is a dict, then it represents - an expanded linear combination and must map Variables to the - coefficient of their differential. Otherwise, it should be a - list of (coefficient, LinearCombination) pairs (that - represents a linear combination expression). - """ - - self.linear_combo = linear_combo - - def __bool__(self): - """ - Return True only if the linear combination is non-empty, i.e. if - the linear combination contains any term. - """ - return bool(self.linear_combo) - - def expanded(self): - """ - Return True if and only if the linear combination is expanded. - """ - return isinstance(self.linear_combo, dict) - - def expand(self): - """ - Expand the linear combination. - - The expansion is a collections.defaultdict(float). + __slots__ = ("_nominal_value", "_uncertainty") - This should only be called if the linear combination is not - yet expanded. + def __init__( + self, + nominal_value: float, + uncertainty: Union[UCombo, Union[float, int]], + tag: Optional[str] = None, + ): """ + nominal_value -- (float) mean value of the random variable. - # The derivatives are built progressively by expanding each - # term of the linear combination until there is no linear - # combination to be expanded. - - # Final derivatives, constructed progressively: - derivatives = collections.defaultdict(float) - - while self.linear_combo: # The list of terms is emptied progressively - # One of the terms is expanded or, if no expansion is - # needed, simply added to the existing derivatives. - # - # Optimization note: since Python's operations are - # left-associative, a long sum of Variables can be built - # such that the last term is essentially a Variable (and - # not a NestedLinearCombination): popping from the - # remaining terms allows this term to be quickly put in - # the final result, which limits the number of terms - # remaining (and whose size can temporarily grow): - (main_factor, main_expr) = self.linear_combo.pop() - - # print "MAINS", main_factor, main_expr - - if main_expr.expanded(): - for var, factor in main_expr.linear_combo.items(): - derivatives[var] += main_factor * factor - - else: # Non-expanded form - for factor, expr in main_expr.linear_combo: - # The main_factor is applied to expr: - self.linear_combo.append((main_factor * factor, expr)) - - # print "DERIV", derivatives - - self.linear_combo = derivatives - - def __getstate__(self): - # Not false, otherwise __setstate__() will not be called: - return (self.linear_combo,) - - def __setstate__(self, state): - (self.linear_combo,) = state - - -class AffineScalarFunc(object): - """ - Affine functions that support basic mathematical operations - (addition, etc.). Such functions can for instance be used for - representing the local (linear) behavior of any function. - - This class can also be used to represent constants. - - The variables of affine scalar functions are Variable objects. - - AffineScalarFunc objects include facilities for calculating the - 'error' on the function, from the uncertainties on its variables. - - Main attributes and methods: - - - nominal_value, std_dev: value at the origin / nominal value, and - standard deviation. The standard deviation can be NaN or infinity. - - - n, s: abbreviations for nominal_value and std_dev. + uncertainty -- Uncertainty of the random variable. This can either be a UCombo + object which represents a linear combination of UAtoms or a simple non-negative + float. In the latter case the non-negative float uncertainty will be created to + use a single term UCombo with a new UAtom using that float as the weight for the + single new UAtom. - - error_components(): error_components()[x] is the error due to - Variable x. - - - derivatives: derivatives[x] is the (value of the) derivative - with respect to Variable x. This attribute is a Derivatives - dictionary whose keys are the Variable objects on which the - function depends. The values are the numerical values of the - derivatives. - - All the Variable objects on which the function depends are in - 'derivatives'. - - - std_score(x): position of number x with respect to the - nominal value, in units of the standard deviation. - """ - - # To save memory in large arrays: - __slots__ = ("_nominal_value", "_linear_part") - - # !! Fix for mean() in NumPy 1.8.0: - class dtype(object): - type = staticmethod(lambda value: value) - - #! The code could be modified in order to accommodate for non-float - # nominal values. This could for instance be done through - # the operator module: instead of delegating operations to - # float.__*__ operations, they could be delegated to - # operator.__*__ functions (while taking care of properly handling - # reverse operations: __radd__, etc.). - - def __init__(self, nominal_value, linear_part): - """ - nominal_value -- value of the function when the linear part is - zero. - - linear_part -- LinearCombination that describes the linear - part of the AffineScalarFunc. + tag -- (string) optional tag for the new UAtom used if the uncertainty is a + float such that a new UCombo and UAtom is generated. """ - - # ! A technical consistency requirement is that the - # linear_part can be nested inside a NestedLinearCombination - # (because this is how functions on AffineScalarFunc calculate - # their result: by constructing nested expressions for them). - - # Defines the value at the origin: - - # Only float-like values are handled. One reason is that it - # does not make sense for a scalar function to be affine to - # not yield float values. Another reason is that it would not - # make sense to have a complex nominal value, here (it would - # not be handled correctly at all): converting to float should - # be possible. - self._nominal_value = float(nominal_value) + if isinstance(uncertainty, (float, int)): + if not isnan(uncertainty) and uncertainty < 0: + raise NegativeStdDev("The standard deviation cannot be negative") + if uncertainty == 0: + uncertainty = UCombo(()) + else: + uncertainty = UCombo(((UAtom(tag=tag), float(uncertainty)),)) + self._uncertainty = uncertainty - # In order to have a linear execution time for long sums, the - # _linear_part is generally left as is (otherwise, each - # successive term would expand to a linearly growing sum of - # terms: efficiently handling such terms [so, without copies] - # is not obvious, when the algorithm should work for all - # functions beyond sums). - if not isinstance(linear_part, LinearCombination): - linear_part = LinearCombination(linear_part) - self._linear_part = linear_part - - # The following prevents the 'nominal_value' attribute from being - # modified by the user: @property def nominal_value(self): - "Nominal value of the random number." + """Nominal value of the random number.""" return self._nominal_value - # Abbreviation (for formulas, etc.): - n = nominal_value - - ############################################################ - - # Making derivatives a property gives the user a clean syntax, - # which is consistent with derivatives becoming a dictionary. @property - def derivatives(self): - """ - Return a mapping from each Variable object on which the function - (self) depends to the value of the derivative with respect to - that variable. - - This mapping should not be modified. + def n(self): + """Abbreviation for nominal_value""" + return self.nominal_value - Derivative values are always floats. - - This mapping is cached, for subsequent calls. - """ - - if not self._linear_part.expanded(): - self._linear_part.expand() - # Attempts to get the contribution of a variable that the - # function does not depend on raise a KeyError: - self._linear_part.linear_combo.default_factory = None - - return self._linear_part.linear_combo - - ######################################## + @property + def uncertainty(self): + return self._uncertainty - # Uncertainties handling: + def covariance(self, other): + return self.uncertainty.covariance(other.uncertainty) + @property def error_components(self): """ - Individual components of the standard deviation of the affine - function (in absolute value), returned as a dictionary with - Variable objects as keys. The returned variables are the - independent variables that the affine function depends on. - - This method assumes that the derivatives contained in the - object take scalar values (and are not a tuple, like what - math.frexp() returns, for instance). - """ - - # Calculation of the variance: - error_components = {} + The uncertainty is stored as a float-weighted linear combination of + UAtom objects. Each UAtom is unique and independent of all other UAtoms. + Each UAtom has a standard deviation of unity. - for variable, derivative in self.derivatives.items(): - # print "TYPE", type(variable), type(derivative) - - # Individual standard error due to variable: - - # 0 is returned even for a NaN derivative (in this case no - # multiplication by the derivative is performed): an exact - # variable obviously leads to no uncertainty in the - # functions that depend on it. - if variable._std_dev == 0: - # !!! Shouldn't the errors always be floats, as a - # convention of this module? - error_components[variable] = 0 - else: - error_components[variable] = abs(derivative * variable._std_dev) - - return error_components + This method returns a dictionary mapping each UAtom with which the + AffineScalarFunc is correlated to its corresponding float weight. + """ + return self.uncertainty.expanded @property def std_dev(self): - """ - Standard deviation of the affine function. - - This method assumes that the function returns scalar results. - - This returned standard deviation depends on the current - standard deviations [std_dev] of the variables (Variable - objects) involved. - """ - #! It would be possible to not allow the user to update the - # std dev of Variable objects, in which case AffineScalarFunc - # objects could have a pre-calculated or, better, cached - # std_dev value (in fact, many intermediate AffineScalarFunc do - # not need to have their std_dev calculated: only the final - # AffineScalarFunc returned to the user does). - return float(sqrt(sum(delta**2 for delta in self.error_components().values()))) + """Standard deviation of the affine function.""" + return self.uncertainty.std_dev - # Abbreviation (for formulas, etc.): - s = std_dev + @property + def s(self): + """Abbreviation for std_dev.""" + return self.std_dev def __repr__(self): - # Not putting spaces around "+/-" helps with arrays of - # Variable, as each value with an uncertainty is a - # block of signs (otherwise, the standard deviation can be - # mistaken for another element of the array). - - std_dev = self.std_dev # Optimization, since std_dev is calculated - - # A zero standard deviation is printed because otherwise, - # ufloat_fromstr() does not correctly parse back the value - # ("1.23" is interpreted as "1.23(1)"): - - if std_dev: - std_dev_str = repr(std_dev) - else: + if self.std_dev == 0: std_dev_str = "0" + else: + std_dev_str = repr(self.std_dev) - return "%r+/-%s" % (self.nominal_value, std_dev_str) + num_repr = f"{self.nominal_value}+/-{std_dev_str}" + return num_repr def __str__(self): - # An empty format string and str() usually return the same - # string - # (http://docs.python.org/2/library/string.html#format-specification-mini-language): return self.format("") + def __hash__(self): + return hash((self.nominal_value, self.uncertainty)) + @set_doc(format_ufloat.__doc__) def __format__(self, format_spec): return format_ufloat(self, format_spec) @@ -585,108 +387,17 @@ def std_score(self, value): Raises a ValueError exception if the standard deviation is zero. """ try: - # The ._nominal_value is a float: there is no integer division, - # here: return (value - self._nominal_value) / self.std_dev except ZeroDivisionError: raise ValueError("The standard deviation is zero:" " undefined result") - def __deepcopy__(self, memo): - """ - Hook for the standard copy module. - The returned AffineScalarFunc is a completely fresh copy, - which is fully independent of any variable defined so far. - New variables are specially created for the returned - AffineScalarFunc object. - """ - return AffineScalarFunc(self._nominal_value, copy.deepcopy(self._linear_part)) - - def __getstate__(self): - """ - Hook for the pickle module. - - The slot attributes of the parent classes are returned, as - well as those of the __dict__ attribute of the object (if - any). - """ - - # In general (case where this class is subclassed), data - # attribute are stored in two places: possibly in __dict_, and - # in slots. Data from both locations is returned by this - # method. - - all_attrs = {} - - # Support for subclasses that do not use __slots__ (except - # through inheritance): instances have a __dict__ - # attribute. The keys in this __dict__ are shadowed by the - # slot attribute names (reference: - # http://stackoverflow.com/questions/15139067/attribute-access-in-python-first-slots-then-dict/15139208#15139208). - # The method below not only preserves this behavior, but also - # saves the full contents of __dict__. This is robust: - # unpickling gives back the original __dict__ even if __dict__ - # contains keys that are shadowed by slot names: - - try: - all_attrs["__dict__"] = self.__dict__ - except AttributeError: - pass +ops.add_arithmetic_ops(UFloat) +ops.add_comparative_ops(UFloat) +to_affine_scalar = UFloat._to_affine_scalar - # All the slot attributes are gathered. - - # Classes that do not define __slots__ have the __slots__ of - # one of their parents (the first parent with their own - # __slots__ in MRO). This is why the slot names are first - # gathered (with repetitions removed, in general), and their - # values obtained later. - - all_slots = set() - - for cls in type(self).mro(): - # In the diamond inheritance pattern, some parent classes - # may not have __slots__: - slot_names = getattr(cls, "__slots__", ()) - - # Slot names can be given in various forms (string, - # sequence, iterable): - if isinstance(slot_names, str): - all_slots.add(slot_names) # Single name - else: - all_slots.update(slot_names) - - # The slot values are stored: - for name in all_slots: - try: - # !! It might happen that '__dict__' is itself a slot - # name. In this case, its value is saved - # again. Alternatively, the loop could be done on - # all_slots - {'__dict__'}: - all_attrs[name] = getattr(self, name) - except AttributeError: - pass # Undefined slot attribute - - return all_attrs - - def __setstate__(self, data_dict): - """ - Hook for the pickle module. - """ - for name, value in data_dict.items(): - # Contrary to the default __setstate__(), this does not - # necessarily save to the instance dictionary (because the - # instance might contain slots): - setattr(self, name, value) - - -ops.add_arithmetic_ops(AffineScalarFunc) -ops.add_comparative_ops(AffineScalarFunc) -to_affine_scalar = AffineScalarFunc._to_affine_scalar - -# Nicer name, for users: isinstance(ufloat(...), UFloat) is -# True. Also: isinstance(..., UFloat) is the test for "is this a -# number with uncertainties from the uncertainties package?": -UFloat = AffineScalarFunc +# Legacy alias for UFloat +AffineScalarFunc = UFloat def wrap(f, derivatives_args=None, derivatives_kwargs=None): @@ -747,130 +458,6 @@ class NegativeStdDev(Exception): pass -class Variable(AffineScalarFunc): - """ - Representation of a float-like scalar Variable with its uncertainty. - - Variables are independent from each other, but correlations between them - are handled through the AffineScalarFunc class. - """ - - # To save memory in large arrays: - __slots__ = ("_std_dev", "tag") - - def __init__(self, value, std_dev, tag=None): - """ - The nominal value and the standard deviation of the variable - are set. - - The value is converted to float. - - The standard deviation std_dev can be NaN. It should normally - be a float or an integer. - - 'tag' is a tag that the user can associate to the variable. This - is useful for tracing variables. - - The meaning of the nominal value is described in the main - module documentation. - """ - - #! The value, std_dev, and tag are assumed by __copy__() not to - # be copied. Either this should be guaranteed here, or __copy__ - # should be updated. - - # Only float-like values are handled. One reason is that the - # division operator on integers would not produce a - # differentiable functions: for instance, Variable(3, 0.1)/2 - # has a nominal value of 3/2 = 1, but a "shifted" value - # of 3.1/2 = 1.55. - value = float(value) - - # If the variable changes by dx, then the value of the affine - # function that gives its value changes by 1*dx: - - # ! Memory cycles are created. However, they are garbage - # collected, if possible. Using a weakref.WeakKeyDictionary - # takes much more memory. Thus, this implementation chooses - # more cycles and a smaller memory footprint instead of no - # cycles and a larger memory footprint. - super(Variable, self).__init__(value, LinearCombination({self: 1.0})) - - self.std_dev = std_dev # Assignment through a Python property - - self.tag = tag - - @property - def std_dev(self): - return self._std_dev - - # Standard deviations can be modified (this is a feature). - # AffineScalarFunc objects that depend on the Variable have their - # std_dev automatically modified (recalculated with the new - # std_dev of their Variables): - @std_dev.setter - def std_dev(self, std_dev): - # We force the error to be float-like. Since it is considered - # as a standard deviation, it must be either positive or NaN: - # (Note: if NaN < 0 is False, there is no need to test - # separately for NaN. But this is not guaranteed, even if it - # should work on most platforms.) - if std_dev < 0 and isfinite(std_dev): - raise NegativeStdDev("The standard deviation cannot be negative") - - self._std_dev = float(std_dev) - - # The following method is overridden so that we can represent the tag: - def __repr__(self): - num_repr = super(Variable, self).__repr__() - - if self.tag is None: - return num_repr - else: - return "< %s = %s >" % (self.tag, num_repr) - - def __hash__(self): - # All Variable objects are by definition independent - # variables, so they never compare equal; therefore, their - # id() are allowed to differ - # (http://docs.python.org/reference/datamodel.html#object.__hash__): - return id(self) - - def __copy__(self): - """ - Hook for the standard copy module. - """ - - # !!!!!! The comment below might not be valid anymore now that - # Variables do not contain derivatives anymore. - - # This copy implicitly takes care of the reference of the - # variable to itself (in self.derivatives): the new Variable - # object points to itself, not to the original Variable. - - # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html - - #! The following assumes that the arguments to Variable are - # *not* copied upon construction, since __copy__ is not supposed - # to copy "inside" information: - return Variable(self.nominal_value, self.std_dev, self.tag) - - def __deepcopy__(self, memo): - """ - Hook for the standard copy module. - - A new variable is created. - """ - - # This deep copy implicitly takes care of the reference of the - # variable to itself (in self.derivatives): the new Variable - # object points to itself, not to the original Variable. - - # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html - - return self.__copy__() - - ############################################################################### # Utilities @@ -908,6 +495,13 @@ def std_dev(x): return 0.0 +def uncertainty(x): + if isinstance(x, UFloat): + return x.uncertainty + else: + return UCombo(()) + + def covariance_matrix(nums_with_uncert): """ Return a matrix that contains the covariances between the given @@ -929,19 +523,19 @@ def covariance_matrix(nums_with_uncert): covariance_matrix = [] for i1, expr1 in enumerate(nums_with_uncert, 1): - derivatives1 = expr1.derivatives # Optimization - vars1 = set(derivatives1) # !! Python 2.7+: viewkeys() would work + error_components_1 = expr1.error_components + uatoms_1 = set(error_components_1) coefs_expr1 = [] for expr2 in nums_with_uncert[:i1]: - derivatives2 = expr2.derivatives # Optimization + error_components_2 = expr2.error_components + uatom_2 = set(error_components_2) coefs_expr1.append( sum( ( - (derivatives1[var] * derivatives2[var] * var._std_dev**2) - # var is a variable common to both numbers with - # uncertainties: - for var in vars1.intersection(derivatives2) + (error_components_1[uatom] * error_components_2[uatom]) + # uatom is common to both numbers with uncertainties: + for uatom in uatoms_1.intersection(uatom_2) ), # The result is always a float (sum() with no terms # returns an integer): @@ -1005,7 +599,7 @@ def ufloat_fromstr(representation, tag=None): return ufloat(nom, std, tag) -def ufloat(nominal_value, std_dev=None, tag=None): +def ufloat(nominal_value, std_dev, tag=None): """ Create an uncertainties Variable @@ -1037,4 +631,4 @@ def ufloat(nominal_value, std_dev=None, tag=None): and `tag` which match the input values. """ - return Variable(nominal_value, std_dev, tag=tag) + return UFloat(nominal_value, std_dev, tag=tag) diff --git a/uncertainties/ops.py b/uncertainties/ops.py index d3904599..f8e861b9 100644 --- a/uncertainties/ops.py +++ b/uncertainties/ops.py @@ -6,6 +6,8 @@ from inspect import getfullargspec import numbers +from uncertainties.ucombo import UCombo + # Some types known to not depend on Variable objects are put in # CONSTANT_TYPES. The most common types can be put in front, as this # may slightly improve the execution speed. @@ -521,17 +523,13 @@ def f_with_affine_output(*args, **kwargs): # defined by (coefficient, argument) pairs, where 'argument' # is an AffineScalarFunc (for all AffineScalarFunc found as # argument of f): - linear_part = [] + uncertainty = UCombo(()) for pos in pos_w_uncert: - linear_part.append( - ( - # Coefficient: - derivatives_args_index[pos](*args_values, **kwargs), - # Linear part of the AffineScalarFunc expression: - args[pos]._linear_part, - ) - ) + arg_uncertainty = args[pos].uncertainty + if arg_uncertainty: + derivative_val = derivatives_args_index[pos](*args_values, **kwargs) + uncertainty += derivative_val * arg_uncertainty for name in names_w_uncert: # Optimization: caching of the automatic numerical @@ -539,24 +537,19 @@ def f_with_affine_output(*args, **kwargs): # discovered. This gives a speedup when the original # function is called repeatedly with the same keyword # arguments: - derivative = derivatives_all_kwargs.setdefault( + if not kwargs_uncert_values[name].uncertainty: + continue + derivative_func = derivatives_all_kwargs.setdefault( name, # Derivative never needed before: partial_derivative(f, name), ) - - linear_part.append( - ( - # Coefficient: - derivative(*args_values, **kwargs), - # Linear part of the AffineScalarFunc expression: - kwargs_uncert_values[name]._linear_part, - ) - ) + derivative_val = derivative_func(*args_values, **kwargs) + uncertainty += derivative_val * kwargs_uncert_values[name].uncertainty # The function now returns the necessary linear approximation # to the function: - return cls(f_nominal_value, linear_part) + return cls(f_nominal_value, uncertainty) f_with_affine_output = set_doc( """\ @@ -741,7 +734,7 @@ def to_affine_scalar(x): if isinstance(x, CONSTANT_TYPES): # No variable => no derivative: - return cls(x, {}) + return cls(x, UCombo(())) # Case of lists, etc. raise NotUpcast( diff --git a/uncertainties/ucombo.py b/uncertainties/ucombo.py new file mode 100644 index 00000000..527b69b2 --- /dev/null +++ b/uncertainties/ucombo.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +from collections import defaultdict +from math import sqrt +from typing import Optional, Tuple, Union +import uuid + + +class UAtom: + __slots__ = ["uuid", "tag", "_hash"] + + def __init__(self: UAtom, tag: Optional[str] = None): + self.tag = tag + self.uuid: uuid.UUID = uuid.uuid4() + self._hash = hash(self.uuid) + + def __eq__(self: UAtom, other: UAtom) -> bool: + return hash(self) == hash(other) + + def __hash__(self: UAtom) -> int: + return self._hash + + def __lt__(self: UAtom, other: UAtom) -> bool: + return self.uuid < other.uuid + + def __str__(self: UAtom) -> str: + uuid_abbrev = f"{str(self.uuid)[0:2]}..{str(self.uuid)[-2:]}" + if self.tag is not None: + label = f"{self.tag}, {uuid_abbrev}" + else: + label = uuid_abbrev + return f"{self.__class__.__name__}({label})" + + def __repr__(self: UAtom) -> str: + return f"{self.__class__.__name__}({self.uuid})" + + +class UCombo: + __slots__ = ["ucombo_tuple", "is_expanded", "_std_dev", "_expanded_dict", "_hash"] + + def __init__( + self: UCombo, + ucombo_tuple: Tuple[Tuple[Union[UAtom, UCombo], float], ...], + ): + self.ucombo_tuple = ucombo_tuple + self.is_expanded = False + self._std_dev = None + self._expanded_dict = None + self._hash = None + + @property + def expanded(self: UCombo) -> dict[UAtom, float]: + if self._expanded_dict is None: + term_list = list(self.ucombo_tuple) + self._expanded_dict = defaultdict(float) + while term_list: + term, weight = term_list.pop() + if isinstance(term, UAtom): + self._expanded_dict[term] += weight + elif term.is_expanded: + for sub_term, sub_weight in term.expanded.items(): + self._expanded_dict[sub_term] += weight * sub_weight + else: + for sub_term, sub_weight in term.ucombo_tuple: + term_list.append((sub_term, weight * sub_weight)) + self._expanded_dict = dict(self._expanded_dict) + self.is_expanded = True + return self._expanded_dict + + @property + def std_dev(self: UCombo) -> float: + if self._std_dev is None: + self._std_dev = sqrt(sum(weight**2 for weight in self.expanded.values())) + return self._std_dev + + def covariance(self: UCombo, other: UCombo): + # TODO: pull out to module function and cache + self_uatoms = set(self.expanded.keys()) + other_uatoms = set(other.expanded.keys()) + shared_uatoms = self_uatoms.intersection(other_uatoms) + covariance = 0 + for uatom in shared_uatoms: + covariance += self.expanded[uatom] * other.expanded[uatom] + return covariance + + def __add__(self: UCombo, other) -> UCombo: + if not isinstance(other, UCombo): + return NotImplemented + return UCombo(self.ucombo_tuple + other.ucombo_tuple) + + def __radd__(self: UCombo, other: UCombo) -> UCombo: + return self.__add__(other) + + def __mul__(self: UCombo, scalar: Union[float, int]) -> UCombo: + if not isinstance(scalar, (float, int)): + return NotImplemented + return UCombo(((self, float(scalar)),)) + + def __rmul__(self: UCombo, scalar: Union[float, int]) -> UCombo: + return self.__mul__(scalar) + + def __iter__(self: UCombo): + return iter(self.ucombo_tuple) + + def __bool__(self: UCombo) -> bool: + return bool(self.ucombo_tuple) + + def __str__(self: UCombo) -> str: + ret_str = "" + first = True + for term, weight in self: + if not first: + ret_str += " + " + else: + first = False + + if isinstance(term, UAtom): + ret_str += f"{weight}×{str(term)}" + else: + ret_str += f"{weight}×({str(term)})" + return ret_str + + def __hash__(self: UCombo) -> int: + if self._hash is None: + self._hash = hash( + tuple(sorted((key, value) for key, value in self.expanded.items())) + ) + return self._hash + + def __eq__(self: UCombo, other: UCombo) -> bool: + return hash(self) == hash(other) diff --git a/uncertainties/umath_core.py b/uncertainties/umath_core.py index 373f71fb..3038a7f0 100644 --- a/uncertainties/umath_core.py +++ b/uncertainties/umath_core.py @@ -19,7 +19,7 @@ # Local modules import uncertainties.core as uncert_core -from uncertainties.core import to_affine_scalar, AffineScalarFunc, LinearCombination +from uncertainties.core import to_affine_scalar, AffineScalarFunc ############################################################################### @@ -366,13 +366,14 @@ def ldexp(x, i): # Another approach would be to add an additional argument to # uncert_core.wrap() so that some arguments are automatically # considered as constants. + # TODO: This function is untested and probably broken right now aff_func = to_affine_scalar(x) # y must be an integer, for math.ldexp if aff_func._linear_part: return AffineScalarFunc( math.ldexp(aff_func.nominal_value, i), - LinearCombination([(2**i, aff_func._linear_part)]), + aff_func._linear_part * 2**i, ) else: # This function was not called with an AffineScalarFunc @@ -395,7 +396,7 @@ def frexp(x): Version of frexp that works for numbers with uncertainty, and also for regular numbers. """ - + # TODO: This function is untested and probably broken right now # The code below is inspired by uncert_core.wrap(). It is # simpler because only 1 argument is given, and there is no # delegation to other functions involved (as for __mul__, etc.). @@ -410,7 +411,7 @@ def frexp(x): # With frexp(x) = (m, e), x = m*2**e, so m = x*2**-e # and therefore dm/dx = 2**-e (as e in an integer that # does not vary when x changes): - LinearCombination([2**-exponent, aff_func._linear_part]), + aff_func._linear_part * 2**-exponent, ), # The exponent is an integer and is supposed to be # continuous (errors must be small): diff --git a/uncertainties/unumpy/core.py b/uncertainties/unumpy/core.py index 763f57d9..e6f2f83c 100644 --- a/uncertainties/unumpy/core.py +++ b/uncertainties/unumpy/core.py @@ -10,11 +10,11 @@ # user. # Standard modules: -from builtins import next from builtins import zip -from builtins import range +from functools import wraps import sys -import inspect +from typing import Callable, Union +from numbers import Real # 3rd-party modules: import numpy @@ -22,6 +22,9 @@ # Local modules: import uncertainties.umath_core as umath_core import uncertainties.core as uncert_core +from uncertainties import UFloat +from uncertainties.ucombo import UCombo + __all__ = [ # Factory functions: @@ -34,6 +37,182 @@ "matrix", ] + +def inject_to_args_kwargs(param, injected_arg, *args, **kwargs): + if isinstance(param, int): + new_kwargs = kwargs + new_args = [] + for idx, arg in enumerate(args): + if idx == param: + new_args.append(injected_arg) + else: + new_args.append(arg) + elif isinstance(param, str): + new_args = args + new_kwargs = kwargs + new_kwargs[param] = injected_arg + else: + raise TypeError(f"{param} must be an int or str, not {type(param)}.") + return new_args, new_kwargs + + +def get_args_kwargs_list(*args, **kwargs): + args_kwargs_list = [] + for idx, arg in enumerate(args): + args_kwargs_list.append((idx, arg)) + for key, arg in kwargs.items(): + args_kwargs_list.append((key, arg)) + return args_kwargs_list + + +SQRT_EPS = numpy.sqrt(sys.float_info.epsilon) + + +def array_numerical_partial_derivative( + f: Callable[..., Real], + target_param: Union[str, int], + array_multi_index: tuple = None, + *args, + **kwargs, +) -> float: + """ + Numerically calculate the partial derivative of a function f with respect to the + target_param (string name or position number of the float parameter to f to be + varied) holding all other arguments, *args and **kwargs, constant. + """ + if isinstance(target_param, int): + x = args[target_param] + else: + x = kwargs[target_param] + + if array_multi_index is None: + dx = abs(x) * SQRT_EPS # Numerical Recipes 3rd Edition, eq. 5.7.5 + x_lower = x - dx + x_upper = x + dx + else: + dx = numpy.mean(numpy.abs(x)) * SQRT_EPS + x_lower = numpy.copy(x) + x_upper = numpy.copy(x) + x_lower[array_multi_index] -= dx + x_upper[array_multi_index] += dx + + lower_args, lower_kwargs = inject_to_args_kwargs( + target_param, + x_lower, + *args, + **kwargs, + ) + upper_args, upper_kwargs = inject_to_args_kwargs( + target_param, + x_upper, + *args, + **kwargs, + ) + + lower_y = f(*lower_args, **lower_kwargs) + upper_y = f(*upper_args, **upper_kwargs) + + derivative = (upper_y - lower_y) / (2 * dx) + return derivative + + +def to_uarray_func(func, derivs=None): + if derivs is None: + derivs = {} + + @wraps(func) + def wrapped(*args, **kwargs): + return_uarray = False + return_matrix = False + + float_args = [] + for arg in args: + if isinstance(arg, UFloat): + float_args.append(arg.nominal_value) + return_uarray = True + elif isinstance(arg, numpy.ndarray): + if isinstance(arg, numpy.matrix): + return_matrix = True + if isinstance(arg.flat[0], UFloat): + float_args.append(to_nominal_values(arg)) + return_uarray = True + else: + float_args.append(arg) + else: + float_args.append(arg) + + float_kwargs = {} + for key, arg in kwargs.items(): + if isinstance(arg, UFloat): + float_kwargs[key] = arg.nominal_value + return_uarray = True + elif isinstance(arg, numpy.ndarray): + if isinstance(arg, numpy.matrix): + return_matrix = True + if isinstance(arg.flat[0], UFloat): + float_kwargs[key] = to_nominal_values(arg) + return_uarray = True + else: + float_kwargs[key] = arg + else: + float_kwargs[key] = arg + + new_nominal_array = func(*float_args, **float_kwargs) + if not return_uarray: + return new_nominal_array + + args_kwargs_list = get_args_kwargs_list(*args, **kwargs) + + ucombo_array = numpy.full(new_nominal_array.shape, UCombo(())) + + for label, arg in args_kwargs_list: + if isinstance(arg, UFloat): + if label in derivs and derivs[label] is not None: + deriv_func = derivs[label] + deriv_arr = deriv_func( + label, + None, + *float_args, + **float_kwargs, + ) + else: + deriv_arr = array_numerical_partial_derivative( + func, label, None, *float_args, **float_kwargs + ) + ucombo_array += deriv_arr * arg.uncertainty + elif isinstance(arg, numpy.ndarray): + if isinstance(arg.flat[0], UFloat): + it = numpy.nditer(arg, flags=["multi_index", "refs_ok"]) + for sub_arg in it: + u_num = sub_arg.item() + if isinstance(u_num, UFloat): + multi_index = it.multi_index + if label in derivs and derivs[label] is not None: + deriv_func = derivs[label] + deriv_arr = deriv_func( + label, + multi_index, + *float_args, + **float_kwargs, + ) + else: + deriv_arr = array_numerical_partial_derivative( + func, + label, + multi_index, + *float_args, + **float_kwargs, + ) + ucombo_array += numpy.array(deriv_arr) * u_num.uncertainty + + u_array = numpy.vectorize(UFloat)(new_nominal_array, ucombo_array) + if return_matrix: + u_array = numpy.matrix(u_array) + return u_array + + return wrapped + + ############################################################################### # Utilities: @@ -70,6 +249,11 @@ ), ) +to_uncertainties = numpy.vectorize( + uncert_core.uncertainty, + otypes=[object], +) + def unumpy_to_numpy_matrix(arr): """ @@ -117,170 +301,6 @@ class from this module) are passed through untouched (because a return unumpy_to_numpy_matrix(to_std_devs(arr)) -############################################################################### - - -def derivative(u, var): - """ - Return the derivative of u along var, if u is an - uncert_core.AffineScalarFunc instance, and if var is one of the - variables on which it depends. Otherwise, return 0. - """ - if isinstance(u, uncert_core.AffineScalarFunc): - try: - return u.derivatives[var] - except KeyError: - return 0.0 - else: - return 0.0 - - -def wrap_array_func(func): - # !!! This function is not used in the code, except in the tests. - # - # !!! The implementation seems superficially similar to - # uncertainties.core.wrap(): is there code/logic duplication - # (which should be removed)? - """ - Return a version of the function func() that works even when - func() is given a NumPy array that contains numbers with - uncertainties, as first argument. - - This wrapper is similar to uncertainties.core.wrap(), except that - it handles an array argument instead of float arguments, and that - the result can be an array. - - However, the returned function is more restricted: the array - argument cannot be given as a keyword argument with the name in - the original function (it is not a drop-in replacement). - - func -- function whose first argument is a single NumPy array, - and which returns a NumPy array. - """ - - @uncert_core.set_doc( - """\ - Version of %s(...) that works even when its first argument is a NumPy - array that contains numbers with uncertainties. - - Warning: elements of the first argument array that are not - AffineScalarFunc objects must not depend on uncert_core.Variable - objects in any way. Otherwise, the dependence of the result in - uncert_core.Variable objects will be incorrect. - - Original documentation: - %s""" - % (func.__name__, func.__doc__) - ) - def wrapped_func(arr, *args, **kwargs): - # Nominal value: - arr_nominal_value = nominal_values(arr) - func_nominal_value = func(arr_nominal_value, *args, **kwargs) - - # The algorithm consists in numerically calculating the derivatives - # of func: - - # Variables on which the array depends are collected: - variables = set() - for element in arr.flat: - # floats, etc. might be present - if isinstance(element, uncert_core.AffineScalarFunc): - # !!!! The following forces an evaluation of the - # derivatives!? Isn't this very slow, when - # working with a large number of arrays? - # - # !! set() is only needed for Python 2 compatibility: - variables |= set(element.derivatives.keys()) - - # If the matrix has no variables, then the function value can be - # directly returned: - if not variables: - return func_nominal_value - - # Calculation of the derivatives of each element with respect - # to the variables. Each element must be independent of the - # others. The derivatives have the same shape as the output - # array (which might differ from the shape of the input array, - # in the case of the pseudo-inverse). - derivatives = numpy.vectorize(lambda _: {})(func_nominal_value) - for var in variables: - # A basic assumption of this package is that the user - # guarantees that uncertainties cover a zone where - # evaluated functions are linear enough. Thus, numerical - # estimates of the derivative should be good over the - # standard deviation interval. This is true for the - # common case of a non-zero standard deviation of var. If - # the standard deviation of var is zero, then var has no - # impact on the uncertainty of the function func being - # calculated: an incorrect derivative has no impact. One - # scenario can give incorrect results, however, but it - # should be extremely uncommon: the user defines a - # variable x with 0 standard deviation, sets y = func(x) - # through this routine, changes the standard deviation of - # x, and prints y; in this case, the uncertainty on y - # might be incorrect, because this program had no idea of - # the scale on which func() is linear, when it calculated - # the numerical derivative. - - # The standard deviation might be numerically too small - # for the evaluation of the derivative, though: we set the - # minimum variable shift. - - shift_var = max(var._std_dev / 1e5, 1e-8 * abs(var._nominal_value)) - # An exceptional case is that of var being exactly zero. - # In this case, an arbitrary shift is used for the - # numerical calculation of the derivative. The resulting - # derivative value might be quite incorrect, but this does - # not matter as long as the uncertainty of var remains 0, - # since it is, in this case, a constant. - if not shift_var: - shift_var = 1e-8 - - # Shift of all the elements of arr when var changes by shift_var: - shift_arr = array_derivative(arr, var) * shift_var - - # Origin value of array arr when var is shifted by shift_var: - shifted_arr_values = arr_nominal_value + shift_arr - func_shifted = func(shifted_arr_values, *args, **kwargs) - numerical_deriv = (func_shifted - func_nominal_value) / shift_var - - # Update of the list of variables and associated - # derivatives, for each element: - for derivative_dict, derivative_value in zip( - derivatives.flat, numerical_deriv.flat - ): - if derivative_value: - derivative_dict[var] = derivative_value - - # numbers with uncertainties are built from the result: - return numpy.vectorize(uncert_core.AffineScalarFunc)( - func_nominal_value, - numpy.vectorize(uncert_core.LinearCombination)(derivatives), - ) - - wrapped_func = uncert_core.set_doc( - """\ - Version of %s(...) that works even when its first argument is a NumPy - array that contains numbers with uncertainties. - - Warning: elements of the first argument array that are not - AffineScalarFunc objects must not depend on uncert_core.Variable - objects in any way. Otherwise, the dependence of the result in - uncert_core.Variable objects will be incorrect. - - Original documentation: - %s""" - % (func.__name__, func.__doc__) - )(wrapped_func) - - # It is easier to work with wrapped_func, which represents a - # wrapped version of 'func', when it bears the same name as - # 'func' (the name is used by repr(wrapped_func)). - wrapped_func.__name__ = func.__name__ - - return wrapped_func - - ############################################################################### # Arrays @@ -307,7 +327,7 @@ def uarray(nominal_values, std_devs=None): # ! Looking up uncert_core.Variable beforehand through # '_Variable = uncert_core.Variable' does not result in a # significant speed up: - lambda v, s: uncert_core.Variable(v, s), + lambda v, s: uncert_core.UFloat(v, s), otypes=[object], )(nominal_values, std_devs) @@ -315,191 +335,25 @@ def uarray(nominal_values, std_devs=None): ############################################################################### -def array_derivative(array_like, var): - """ - Return the derivative of the given array with respect to the - given variable. - - The returned derivative is a NumPy ndarray of the same shape as - array_like, that contains floats. - - array_like -- array-like object (list, etc.) that contains - scalars or numbers with uncertainties. - - var -- Variable object. - """ - return numpy.vectorize( - lambda u: derivative(u, var), - # The type is set because an - # integer derivative should not - # set the output type of the - # array: - otypes=[float], - )(array_like) - - -def func_with_deriv_to_uncert_func(func_with_derivatives): - # This function is used for instance for the calculation of the - # inverse and pseudo-inverse of a matrix with uncertainties. - """ - Return a function that can be applied to array-like objects that - contain numbers with uncertainties (lists, lists of lists, NumPy - arrays, etc.). - - func_with_derivatives -- defines a function that takes an - array-like object containing scalars and returns an array. Both - the value and the derivatives of this function with respect to - multiple scalar parameters are calculated by this - func_with_derivatives() argument. - - func_with_derivatives(arr, input_type, derivatives, *args, - **kwargs) must return an iterator. The first element returned by - this iterator is the value of the function at the n-dimensional - array-like 'arr' (with the correct type). The following elements - are arrays that represent the derivative of the function for each - derivative array from the iterator 'derivatives'. - - func_with_derivatives() takes the following arguments: - - arr -- NumPy ndarray of scalars where the function must be - evaluated. - - input_type -- data type of the input array-like object. This - type is used for determining the type that the function should - return. - - derivatives -- iterator that returns the derivatives of the - argument of the function with respect to multiple scalar - variables. func_with_derivatives() returns the derivatives of - the defined function with respect to these variables. - - args -- additional arguments that define the result (example: - for the pseudo-inverse numpy.linalg.pinv: numerical cutoff). - - Examples of func_with_derivatives: inv_with_derivatives(). - """ - - def wrapped_func(array_like, *args, **kwargs): - """ - array_like -- n-dimensional array-like object that contains - numbers with uncertainties (list, NumPy ndarray or matrix, - etc.). - - args -- additional arguments that are passed directly to - func_with_derivatives. - """ - - # The calculation below is not lazy, contrary to the linear - # error propagation done in AffineScalarFunc. Making it lazy - # in the same way would be quite a specific task: basically - # this would amount to generalizing scalar coefficients in - # core.LinearCombination to more general matrix - # multiplications, and to replace Variable differentials by - # full matrices of coefficients. This does not look very - # efficient, as matrices are quite big, and since caching the - # result of a few matrix functions that are not typically - # stringed one after the other (unlike a big sum of numbers) - # should not be needed. - - # So that .flat works even if array_like is a list: - array_version = numpy.asanyarray(array_like) - - # Variables on which the array depends are collected: - variables = set() - for element in array_version.flat: - # floats, etc. might be present - if isinstance(element, uncert_core.AffineScalarFunc): - # !!! set() is only needed for Python 2 compatibility: - variables |= set(element.derivatives.keys()) - - array_nominal = nominal_values(array_version) - # Function value, then derivatives at array_nominal (the - # derivatives are with respect to the variables contained in - # array_like): - func_then_derivs = func_with_derivatives( - array_nominal, - type(array_like), - (array_derivative(array_version, var) for var in variables), - *args, - **kwargs, - ) - - func_nominal_value = next(func_then_derivs) - - if not variables: - return func_nominal_value - - # The result is built progressively, with the contribution of - # each variable added in turn: - - # Calculation of the derivatives of the result with respect to - # the variables. - derivatives = numpy.array( - [{} for _ in range(func_nominal_value.size)], dtype=object - ).reshape(func_nominal_value.shape) - - # Memory-efficient approach. A memory-hungry approach would - # be to calculate the matrix derivatives will respect to all - # variables and then combine them into a matrix of - # AffineScalarFunc objects. The approach followed here is to - # progressively build the matrix of derivatives, by - # progressively adding the derivatives with respect to - # successive variables. - for var, deriv_wrt_var in zip(variables, func_then_derivs): - # Update of the list of variables and associated - # derivatives, for each element: - for derivative_dict, derivative_value in zip( - derivatives.flat, deriv_wrt_var.flat - ): - if derivative_value: - derivative_dict[var] = derivative_value - - # An array of numbers with uncertainties is built from the - # result: - result = numpy.vectorize(uncert_core.AffineScalarFunc)( - func_nominal_value, - numpy.vectorize(uncert_core.LinearCombination)(derivatives), - ) - - # NumPy matrices that contain numbers with uncertainties are - # better as unumpy matrices: - if isinstance(result, numpy.matrix): - result = result.view(matrix) - - return result - - return wrapped_func - - ########## Matrix inverse -def inv_with_derivatives(arr, input_type, derivatives): - """ - Defines the matrix inverse and its derivatives. - - See the definition of func_with_deriv_to_uncert_func() for its - detailed semantics. - """ - - inverse = numpy.linalg.inv(arr) - # The inverse of a numpy.matrix is a numpy.matrix. It is assumed - # that numpy.linalg.inv is such that other types yield - # numpy.ndarrays: - if issubclass(input_type, numpy.matrix): - inverse = inverse.view(numpy.matrix) - yield inverse +def inv_deriv(label, multi_index, *args, **kwargs): + if isinstance(label, int): + arr = args[label] + elif isinstance(label, str): + arr = kwargs[label] + else: + raise ValueError - # It is mathematically convenient to work with matrices: - inverse_mat = numpy.asmatrix(inverse) + deriv_arr = numpy.zeros_like(arr) + deriv_arr[multi_index] = 1 - # Successive derivatives of the inverse: - for derivative in derivatives: - derivative_mat = numpy.asmatrix(derivative) - yield -inverse_mat * derivative_mat * inverse_mat + inv_arr = numpy.linalg.inv(arr) + return -inv_arr @ deriv_arr @ inv_arr -inv = func_with_deriv_to_uncert_func(inv_with_derivatives) +inv = to_uarray_func(numpy.linalg.inv, derivs={0: inv_deriv, "a": inv_deriv}) inv.__doc__ = ( """\ Version of numpy.linalg.inv that works with array-like objects @@ -519,75 +373,51 @@ def inv_with_derivatives(arr, input_type, derivatives): ########## Matrix pseudo-inverse -def pinv_with_derivatives(arr, input_type, derivatives, rcond): +def pinv_deriv(label, multi_index, *args, **kwargs): + if isinstance(label, int): + A = args[label] + elif isinstance(label, str): + A = kwargs[label] + else: + raise ValueError + """ - Defines the matrix pseudo-inverse and its derivatives. + Analytical calculation of the derivative of the Pseudo-Inverse of matrix A with + respect to an element of matrix A. This calculatinon is done according to + formula (4.12) from - Works with real or complex matrices. + G. H. Golub, V. Pereyra, "The Differentiation of Pseudo-Inverses and Nonlinear Least + Squares Problems Whose Variables Separate", Journal on Numerical Analysis, Vol. 10, + No. 2 (Apr., 1973), pp. 413-432 - See the definition of func_with_deriv_to_uncert_func() for its - detailed semantics. + See also + http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse """ - inverse = numpy.linalg.pinv(arr, rcond) - # The pseudo-inverse of a numpy.matrix is a numpy.matrix. It is - # assumed that numpy.linalg.pinv is such that other types yield - # numpy.ndarrays: - if issubclass(input_type, numpy.matrix): - inverse = inverse.view(numpy.matrix) - yield inverse - - # It is mathematically convenient to work with matrices: - inverse_mat = numpy.asmatrix(inverse) - - # Formula (4.12) from The Differentiation of Pseudo-Inverses and - # Nonlinear Least Squares Problems Whose Variables - # Separate. Author(s): G. H. Golub and V. Pereyra. Source: SIAM - # Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), - # pp. 413-432 - - # See also - # http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse - - # Shortcuts. All the following factors should be numpy.matrix objects: - PA = arr * inverse_mat - AP = inverse_mat * arr - factor21 = inverse_mat * inverse_mat.H - factor22 = numpy.eye(arr.shape[0]) - PA - factor31 = numpy.eye(arr.shape[1]) - AP - factor32 = inverse_mat.H * inverse_mat - - # Successive derivatives of the inverse: - for derivative in derivatives: - derivative_mat = numpy.asmatrix(derivative) - term1 = -inverse_mat * derivative_mat * inverse_mat - derivative_mat_H = derivative_mat.H - term2 = factor21 * derivative_mat_H * factor22 - term3 = factor31 * derivative_mat_H * factor32 - yield term1 + term2 + term3 - - -# Default rcond argument for the generalization of numpy.linalg.pinv: -# -# Most common modern case first: -try: - pinv_default = inspect.signature(numpy.linalg.pinv).parameters["rcond"].default -except AttributeError: # No inspect.signature() before Python 3.3 - try: - # In numpy 1.17+, pinv is wrapped using a decorator which unfortunately - # results in the metadata (argument defaults) being lost. However, we - # can still get at the original function using the __wrapped__ - # attribute (which is what inspect.signature() does). - pinv_default = numpy.linalg.pinv.__wrapped__.__defaults__[0] - except AttributeError: # Function not wrapped in NumPy < 1.17 - pinv_default = numpy.linalg.pinv.__defaults__[0] # Python 1, 2.6+: - -pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives) - - -def pinv(array_like, rcond=pinv_default): - return pinv_with_uncert(array_like, rcond) + Aprime = numpy.zeros_like(A) + Aprime[multi_index] = 1 + + Aplus = numpy.linalg.pinv(*args, **kwargs) + + n, m = A.shape[-2:] + eye_n = numpy.eye(n) + eye_m = numpy.eye(m) + + ndim = len(A.shape) + trans_axes = list(range(ndim)) + trans_axes[0], trans_axes[1] = trans_axes[1], trans_axes[0] + + AplusT = numpy.transpose(Aplus, axes=trans_axes) + AprimeT = numpy.transpose(Aprime, axes=trans_axes) + + return ( + -Aplus @ Aprime @ Aplus + + Aplus @ AplusT @ AprimeT @ (eye_n - A @ Aplus) + + (eye_m - Aplus @ A) @ AprimeT @ AplusT @ Aplus + ) + +pinv = to_uarray_func(numpy.linalg.pinv, derivs={0: pinv_deriv, "a": pinv_deriv}) pinv = uncert_core.set_doc( """