From 3ef988a6eb34a5e769911611bc6f4ca8fe6cb0c7 Mon Sep 17 00:00:00 2001 From: andrewgsavage Date: Fri, 12 Jul 2024 07:38:01 +0100 Subject: [PATCH] pre-commit linting (#248) --- .github/workflows/lint.yml | 17 + .pre-commit-config.yaml | 18 + 00_prepare_for_PyPI.sh | 2 +- doc/conf.py | 108 +-- tests/helpers.py | 124 +-- tests/test_ulinalg.py | 24 +- tests/test_umath.py | 124 +-- tests/test_uncertainties.py | 1247 +++++++++++++++--------------- tests/test_unumpy.py | 86 +-- uncertainties/__init__.py | 13 +- uncertainties/core.py | 204 ++--- uncertainties/formatting.py | 417 +++++----- uncertainties/ops.py | 281 +++---- uncertainties/parsing.py | 93 ++- uncertainties/umath.py | 7 +- uncertainties/umath_core.py | 157 ++-- uncertainties/unumpy/__init__.py | 8 +- uncertainties/unumpy/core.py | 213 +++-- uncertainties/unumpy/ulinalg.py | 4 +- 19 files changed, 1659 insertions(+), 1488 deletions(-) create mode 100644 .github/workflows/lint.yml create mode 100644 .pre-commit-config.yaml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..e2d26381 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,17 @@ +name: Lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.x + - name: Lint + uses: pre-commit/action@v2.0.0 + with: + extra_args: --all-files --show-diff-on-failure diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..4bb03745 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.3.4 + hooks: + # Run the linter. + - id: ruff + types_or: [ python, pyi, jupyter ] + args: [ --fix ] + # Run the formatter. + - id: ruff-format + types_or: [ python, pyi, jupyter ] diff --git a/00_prepare_for_PyPI.sh b/00_prepare_for_PyPI.sh index 87e32d7b..84a9f8bc 100755 --- a/00_prepare_for_PyPI.sh +++ b/00_prepare_for_PyPI.sh @@ -7,7 +7,7 @@ # Fail the script at the first failed command (HOWEVER, maybe when there are # no commits to be done during the merges, the commands fail?): -#set -e +#set -e echo "****************************************************************" echo "WARNING: if any commit fails, RESOLVE IT before running this" diff --git a/doc/conf.py b/doc/conf.py index 7acd3457..4809038e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,83 +13,84 @@ from datetime import date -import sys, os +import sys +import os -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) import uncertainties # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) +# sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx_copybutton'] +extensions = ["sphinx.ext.autodoc", "sphinx_copybutton"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'uncertainties' -copyright = f'2010–{date.today().year}, Eric O. LEBIGOT (EOL)' +project = "uncertainties" +copyright = f"2010–{date.today().year}, Eric O. LEBIGOT (EOL)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '1' +version = "1" # The full version, including alpha/beta/rc tags. release = uncertainties.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- @@ -97,9 +98,9 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'sphinxdoc' -html_theme = 'bizstyle' +html_theme = "bizstyle" # html_theme = 'cloud' -html_theme = 'python_docs_theme' +html_theme = "python_docs_theme" # html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme @@ -108,52 +109,52 @@ # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = "uncertainties" # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '_static/favicon.ico' +html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html', 'globaltoc.html']} +html_sidebars = {"index": ["indexsidebar.html", "searchbox.html", "globaltoc.html"]} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False @@ -161,52 +162,57 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'uncertainties' +htmlhelp_basename = "uncertainties" # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'uncertainties.tex', u'uncertainties Python package Documentation', - u'Eric O. LEBIGOT (EOL)', 'manual'), + ( + "index", + "uncertainties.tex", + "uncertainties Python package Documentation", + "Eric O. LEBIGOT (EOL)", + "manual", + ), ] -#latex_engine = "xelatex" # Not recognized by readthedocs.io as of 2018-04-08 +# latex_engine = "xelatex" # Not recognized by readthedocs.io as of 2018-04-08 # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = r'\DeclareUnicodeCharacter{207B}{$^-$}' +# latex_preamble = r'\DeclareUnicodeCharacter{207B}{$^-$}' latex_elements = { # Superscript -, etc. for pdflatex (unnecessary, with xelatex): - 'preamble': r''' + "preamble": r""" \DeclareUnicodeCharacter{207B}{$^-$} \DeclareUnicodeCharacter{22C5}{$\cdot$} -''' +""" } # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True diff --git a/tests/helpers.py b/tests/helpers.py index eec53dff..7dc7fcea 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -2,10 +2,11 @@ from math import isnan, isinf import uncertainties.core as uncert_core -from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr +from uncertainties.core import ufloat, AffineScalarFunc + def power_all_cases(op): - ''' + """ Checks all cases for the value and derivatives of power-like operator op (op is typically the built-in pow(), or math.pow()). @@ -19,7 +20,7 @@ def power_all_cases(op): (not all combinations are distinct: for instance x > 0 gives identical formulas for all p). - ''' + """ zero = ufloat(0, 0.1) zero2 = ufloat(0, 0.1) @@ -59,7 +60,7 @@ def power_all_cases(op): assert result.derivatives[zero] == 1 assert result.derivatives[one] == 0 - result = op(zero, 2*one) + result = op(zero, 2 * one) assert result.derivatives[zero] == 0 assert result.derivatives[one] == 0 @@ -88,13 +89,13 @@ def power_all_cases(op): def power_special_cases(op): - ''' + """ Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow). The values x = 0, x = 1 and x = NaN are special, as are null, integral and NaN values of p. - ''' + """ zero = ufloat(0, 0) one = ufloat(1, 0) @@ -105,8 +106,8 @@ def power_special_cases(op): # The outcome of 1**nan and nan**0 was undefined before Python # 2.6 (http://docs.python.org/library/math.html#math.pow): - assert op(float('nan'), zero) == 1.0 - assert op(one, float('nan')) == 1.0 + assert op(float("nan"), zero) == 1.0 + assert op(one, float("nan")) == 1.0 # …**0 == 1.0: assert op(p, 0) == 1.0 @@ -132,17 +133,18 @@ def power_special_cases(op): assert op(one, zero) == 1.0 assert op(one, p) == 1.0 # 1**… == 1.0: - assert op(1., (-p)) == 1.0 - assert op(1., zero) == 1.0 - assert op(1., p) == 1.0 + assert op(1.0, (-p)) == 1.0 + assert op(1.0, zero) == 1.0 + assert op(1.0, p) == 1.0 + def power_wrt_ref(op, ref_op): - ''' + """ Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow), by comparing its results to the reference power operator ref_op (which is typically the built-in pow or math.pow). - ''' + """ # Negative numbers with uncertainty can be exponentiated to an # integral power: @@ -160,6 +162,7 @@ def power_wrt_ref(op, ref_op): # Utilities for unit testing + def numbers_close(x, y, tolerance=1e-6): """ Returns True if the given floats are close enough. @@ -182,31 +185,33 @@ def numbers_close(x, y, tolerance=1e-6): return isnan(y) else: # Symmetric form of the test: - return 2*abs(x-y)/(abs(x)+abs(y)) < tolerance + return 2 * abs(x - y) / (abs(x) + abs(y)) < tolerance else: # Either x or y is zero return abs(x or y) < tolerance + def ufloats_close(x, y, tolerance=1e-6): - ''' + """ Tests if two numbers with uncertainties are close, as random variables: this is stronger than testing whether their nominal value and standard deviation are close. The tolerance is applied to both the nominal value and the standard deviation of the difference between the numbers. - ''' + """ + + diff = x - y + return numbers_close(diff.nominal_value, 0, tolerance) and numbers_close( + diff.std_dev, 0, tolerance + ) - diff = x-y - return (numbers_close(diff.nominal_value, 0, tolerance) - and numbers_close(diff.std_dev, 0, tolerance)) class DerivativesDiffer(Exception): pass -def compare_derivatives(func, numerical_derivatives, - num_args_list=None): +def compare_derivatives(func, numerical_derivatives, num_args_list=None): """ Checks the derivatives of a function 'func' (as returned by the wrap() wrapper), by comparing them to the @@ -228,18 +233,16 @@ def compare_derivatives(func, numerical_derivatives, # print "Testing", func.__name__ if not num_args_list: - # Detecting automatically the correct number of arguments is not # always easy (because not all values are allowed, etc.): num_args_table = { - 'atanh': [1], - 'log': [1, 2] # Both numbers of arguments are tested - } + "atanh": [1], + "log": [1, 2], # Both numbers of arguments are tested + } if funcname in num_args_table: num_args_list = num_args_table[funcname] else: - num_args_list = [] # We loop until we find reasonable function arguments: @@ -250,7 +253,7 @@ def compare_derivatives(func, numerical_derivatives, # certain functions from failing even though num_args # is their correct number of arguments # (e.g. math.ldexp(x, i), where i must be an integer) - func(*(1,)*num_args) + func(*(1,) * num_args) except TypeError: pass # Not the right number of arguments else: # No error @@ -258,33 +261,31 @@ def compare_derivatives(func, numerical_derivatives, num_args_list.append(num_args) if not num_args_list: - raise Exception("Can't find a reasonable number of arguments" - " for function '%s'." % funcname) + raise Exception( + "Can't find a reasonable number of arguments" + " for function '%s'." % funcname + ) for num_args in num_args_list: - # Argument numbers that will have a random integer value: integer_arg_nums = set() - if funcname == 'ldexp': + if funcname == "ldexp": # The second argument must be an integer: integer_arg_nums.add(1) while True: try: - # We include negative numbers, for more thorough tests: args = [] for arg_num in range(num_args): if arg_num in integer_arg_nums: args.append(random.choice(range(-10, 10))) else: - args.append( - uncert_core.Variable(random.random()*4-2, 0)) + args.append(uncert_core.Variable(random.random() * 4 - 2, 0)) # 'args', but as scalar values: - args_scalar = [uncert_core.nominal_value(v) - for v in args] + args_scalar = [uncert_core.nominal_value(v) for v in args] func_approx = func(*args) @@ -292,11 +293,10 @@ def compare_derivatives(func, numerical_derivatives, # wrapping in wrap(): no test has to be performed. # Some functions also yield tuples... if isinstance(func_approx, AffineScalarFunc): - # We compare all derivatives: - for (arg_num, (arg, numerical_deriv)) in ( - enumerate(zip(args, numerical_derivatives))): - + for arg_num, (arg, numerical_deriv) in enumerate( + zip(args, numerical_derivatives) + ): # Some arguments might not be differentiable: if isinstance(arg, int): continue @@ -308,12 +308,12 @@ def compare_derivatives(func, numerical_derivatives, # This message is useful: the user can see that # tests are really performed (instead of not being # performed, silently): - print("Testing derivative #%d of %s at %s" % ( - arg_num, funcname, args_scalar)) - - if not numbers_close(fixed_deriv_value, - num_deriv_value, 1e-4): + print( + "Testing derivative #%d of %s at %s" + % (arg_num, funcname, args_scalar) + ) + if not numbers_close(fixed_deriv_value, num_deriv_value, 1e-4): # It is possible that the result is NaN: if not isnan(func_approx): raise DerivativesDiffer( @@ -321,20 +321,27 @@ def compare_derivatives(func, numerical_derivatives, " wrong: at args = %s," " value obtained = %.16f," " while numerical approximation = %.16f." - % (arg_num, funcname, args, - fixed_deriv_value, num_deriv_value)) + % ( + arg_num, + funcname, + args, + fixed_deriv_value, + num_deriv_value, + ) + ) except ValueError as err: # Arguments out of range, or of wrong type # Factorial(real) lands here: - if str(err).startswith('factorial'): + if str(err).startswith("factorial"): integer_arg_nums = set([0]) continue # We try with different arguments # Some arguments might have to be integers, for instance: except TypeError as err: if len(integer_arg_nums) == num_args: - raise Exception("Incorrect testing procedure: unable to " - "find correct argument values for %s: %s" - % (funcname, err)) + raise Exception( + "Incorrect testing procedure: unable to " + "find correct argument values for %s: %s" % (funcname, err) + ) # Another argument might be forced to be an integer: integer_arg_nums.add(random.choice(range(num_args))) @@ -342,11 +349,12 @@ def compare_derivatives(func, numerical_derivatives, # We have found reasonable arguments, and the test passed: break + ############################################################################### try: - import numpy + import numpy # noqa except ImportError: pass else: @@ -370,20 +378,16 @@ def uarrays_close(m1, m2, precision=1e-4): # work on arrays that contain numbers with uncertainties, because # of the isinf() function. - for (elmt1, elmt2) in zip(m1.flat, m2.flat): - + for elmt1, elmt2 in zip(m1.flat, m2.flat): # For a simpler comparison, both elements are # converted to AffineScalarFunc objects: elmt1 = uncert_core.to_affine_scalar(elmt1) elmt2 = uncert_core.to_affine_scalar(elmt2) - if not numbers_close(elmt1.nominal_value, - elmt2.nominal_value, precision): + if not numbers_close(elmt1.nominal_value, elmt2.nominal_value, precision): return False - if not numbers_close(elmt1.std_dev, - elmt2.std_dev, precision): + if not numbers_close(elmt1.std_dev, elmt2.std_dev, precision): return False - - return True + return True diff --git a/tests/test_ulinalg.py b/tests/test_ulinalg.py index c6c1de4a..7b4652d6 100644 --- a/tests/test_ulinalg.py +++ b/tests/test_ulinalg.py @@ -5,11 +5,13 @@ import numpy except ImportError: import sys + sys.exit() # There is no reason to test the interface to NumPy from uncertainties import unumpy, ufloat from helpers import uarrays_close + def test_list_inverse(): "Test of the inversion of a square matrix" @@ -22,8 +24,9 @@ def test_list_inverse(): # More type testing: mat_matrix = numpy.asmatrix(mat_list) - assert isinstance(unumpy.ulinalg.inv(mat_matrix), - type(numpy.linalg.inv(mat_matrix))) + assert isinstance( + unumpy.ulinalg.inv(mat_matrix), type(numpy.linalg.inv(mat_matrix)) + ) # unumpy.ulinalg should behave in the same way as numpy.linalg, # with respect to types: @@ -36,8 +39,8 @@ def test_list_inverse(): assert not isinstance(mat_list_inv, unumpy.matrix) # Individual element check: - assert isinstance(mat_list_inv[1,1], float) - assert mat_list_inv[1,1] == -1 + assert isinstance(mat_list_inv[1, 1], float) + assert mat_list_inv[1, 1] == -1 x = ufloat(1, 0.1) y = ufloat(2, 0.1) @@ -59,16 +62,19 @@ def test_list_pseudo_inverse(): # Internal consistency: the inverse and the pseudo-inverse yield # the same result on square matrices: assert uarrays_close(mat.I, unumpy.ulinalg.pinv(mat), 1e-4) - assert uarrays_close(unumpy.ulinalg.inv(mat), - # Support for the optional pinv argument is - # tested: - unumpy.ulinalg.pinv(mat, 1e-15), 1e-4) + assert uarrays_close( + unumpy.ulinalg.inv(mat), + # Support for the optional pinv argument is + # tested: + unumpy.ulinalg.pinv(mat, 1e-15), + 1e-4, + ) # Non-square matrices: x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat1 = unumpy.matrix([[x, y]]) # "Long" matrix - mat2 = unumpy.matrix([[x, y], [1, 3+x], [y, 2*x]]) # "Tall" matrix + mat2 = unumpy.matrix([[x, y], [1, 3 + x], [y, 2 * x]]) # "Tall" matrix # Internal consistency: assert uarrays_close(mat1.I, unumpy.ulinalg.pinv(mat1, 1e-10)) diff --git a/tests/test_umath.py b/tests/test_umath.py index 573c6b6a..354e7588 100644 --- a/tests/test_umath.py +++ b/tests/test_umath.py @@ -1,15 +1,21 @@ import math -from math import isnan, isinf +from math import isnan from uncertainties import ufloat import uncertainties.core as uncert_core import uncertainties.umath_core as umath_core -from helpers import (power_special_cases, power_all_cases, power_wrt_ref, - compare_derivatives, numbers_close) +from helpers import ( + power_special_cases, + power_all_cases, + power_wrt_ref, + compare_derivatives, + numbers_close, +) ############################################################################### # Unit tests + def test_fixed_derivatives_math_funcs(): """ Comparison between function derivatives and numerical derivatives. @@ -22,7 +28,8 @@ def test_fixed_derivatives_math_funcs(): # Numerical derivatives of func: the nominal value of func() results # is used as the underlying function: numerical_derivatives = uncert_core.NumericalDerivatives( - lambda *args: func(*args)) + lambda *args: func(*args) + ) compare_derivatives(func, numerical_derivatives) # Functions that are not in umath_core.many_scalars_to_scalar_funcs: @@ -31,33 +38,32 @@ def test_fixed_derivatives_math_funcs(): # modf(): returns a tuple: def frac_part_modf(x): return umath_core.modf(x)[0] + def int_part_modf(x): return umath_core.modf(x)[1] compare_derivatives( - frac_part_modf, - uncert_core.NumericalDerivatives( - lambda x: frac_part_modf(x))) + frac_part_modf, uncert_core.NumericalDerivatives(lambda x: frac_part_modf(x)) + ) compare_derivatives( - int_part_modf, - uncert_core.NumericalDerivatives( - lambda x: int_part_modf(x))) + int_part_modf, uncert_core.NumericalDerivatives(lambda x: int_part_modf(x)) + ) ## # frexp(): returns a tuple: def mantissa_frexp(x): return umath_core.frexp(x)[0] + def exponent_frexp(x): return umath_core.frexp(x)[1] compare_derivatives( - mantissa_frexp, - uncert_core.NumericalDerivatives( - lambda x: mantissa_frexp(x))) + mantissa_frexp, uncert_core.NumericalDerivatives(lambda x: mantissa_frexp(x)) + ) compare_derivatives( - exponent_frexp, - uncert_core.NumericalDerivatives( - lambda x: exponent_frexp(x))) + exponent_frexp, uncert_core.NumericalDerivatives(lambda x: exponent_frexp(x)) + ) + def test_compound_expression(): """ @@ -67,7 +73,7 @@ def test_compound_expression(): x = ufloat(3, 0.1) # Prone to numerical errors (but not much more than floats): - assert umath_core.tan(x) == umath_core.sin(x)/umath_core.cos(x) + assert umath_core.tan(x) == umath_core.sin(x) / umath_core.cos(x) def test_numerical_example(): @@ -79,11 +85,14 @@ def test_numerical_example(): # for all analytical and numerical derivatives, which would make # test_fixed_derivatives_math_funcs() succeed despite incorrect # calculations: - assert ("%.6f +/- %.6f" % (result.nominal_value, result.std_dev) - == "0.001593 +/- 0.010000") + assert ( + "%.6f +/- %.6f" % (result.nominal_value, result.std_dev) + == "0.001593 +/- 0.010000" + ) # Regular calculations should still work: - assert("%.11f" % umath_core.sin(3) == "0.14112000806") + assert "%.11f" % umath_core.sin(3) == "0.14112000806" + def test_monte_carlo_comparison(): """ @@ -99,6 +108,7 @@ def test_monte_carlo_comparison(): import numpy.random except ImportError: import warnings + warnings.warn("Test not performed because NumPy is not available") return @@ -122,8 +132,9 @@ def function(x, y): nominal_value_this_module = function_result_this_module.nominal_value # Covariances "f*f", "f*x", "f*y": - covariances_this_module = numpy.array(uncert_core.covariance_matrix( - (x, y, function_result_this_module))) + covariances_this_module = numpy.array( + uncert_core.covariance_matrix((x, y, function_result_this_module)) + ) def monte_carlo_calc(n_samples): """ @@ -131,10 +142,8 @@ def monte_carlo_calc(n_samples): median, and the covariances between (x, y, function(x, y)). """ # Result of a Monte-Carlo simulation: - x_samples = numpy.random.normal(x.nominal_value, x.std_dev, - n_samples) - y_samples = numpy.random.normal(y.nominal_value, y.std_dev, - n_samples) + x_samples = numpy.random.normal(x.nominal_value, x.std_dev, n_samples) + y_samples = numpy.random.normal(y.nominal_value, y.std_dev, n_samples) # !! astype() is a fix for median() in NumPy 1.8.0: function_samples = function(x_samples, y_samples).astype(float) @@ -145,7 +154,6 @@ def monte_carlo_calc(n_samples): (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000) - ## Comparison between both results: # The covariance matrices must be close: @@ -159,14 +167,13 @@ def monte_carlo_calc(n_samples): # occurrences of numbers_close. assert numpy.vectorize(numbers_close)( - covariances_this_module, - covariances_samples, - 0.06).all(), ( + covariances_this_module, covariances_samples, 0.06 + ).all(), ( "The covariance matrices do not coincide between" " the Monte-Carlo simulation and the direct calculation:\n" "* Monte-Carlo:\n%s\n* Direct calculation:\n%s" % (covariances_samples, covariances_this_module) - ) + ) # The nominal values must be close: assert numbers_close( @@ -175,14 +182,16 @@ def monte_carlo_calc(n_samples): # The scale of the comparison depends on the standard # deviation: the nominal values can differ by a fraction of # the standard deviation: - math.sqrt(covariances_samples[2, 2]) - / abs(nominal_value_samples) * 0.5), ( + math.sqrt(covariances_samples[2, 2]) / abs(nominal_value_samples) * 0.5, + ), ( "The nominal value (%f) does not coincide with that of" " the Monte-Carlo simulation (%f), for a standard deviation of %f." - % (nominal_value_this_module, - nominal_value_samples, - math.sqrt(covariances_samples[2, 2])) + % ( + nominal_value_this_module, + nominal_value_samples, + math.sqrt(covariances_samples[2, 2]), ) + ) def test_math_module(): @@ -212,7 +221,6 @@ def test_math_module(): # Functions that give locally constant results are tested: they # should give the same result as their float equivalent: for name in umath_core.locally_cst_funcs: - try: func = getattr(umath_core, name) except AttributeError: @@ -221,7 +229,7 @@ def test_math_module(): assert func(x) == func(x.nominal_value) # The type should be left untouched. For example, isnan() # should always give a boolean: - assert type(func(x)) == type(func(x.nominal_value)) + assert isinstance(func(x), type(func(x.nominal_value))) # The same exceptions should be generated when numbers with uncertainties # are used: @@ -242,24 +250,25 @@ def test_math_module(): except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: - raise Exception('%s exception expected' % exception_class.__name__) + raise Exception("%s exception expected" % exception_class.__name__) try: umath_core.log(ufloat(0, 0)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: - raise Exception('%s exception expected' % exception_class.__name__) + raise Exception("%s exception expected" % exception_class.__name__) try: umath_core.log(ufloat(0, 1)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: - raise Exception('%s exception expected' % exception_class.__name__) + raise Exception("%s exception expected" % exception_class.__name__) + def test_hypot(): - ''' + """ Special cases where derivatives cannot be calculated: - ''' + """ x = ufloat(0, 1) y = ufloat(0, 2) # Derivatives that cannot be calculated simply return NaN, with no @@ -268,20 +277,22 @@ def test_hypot(): assert isnan(result.derivatives[x]) assert isnan(result.derivatives[y]) + def test_power_all_cases(): - ''' + """ Test special cases of umath_core.pow(). - ''' + """ power_all_cases(umath_core.pow) + # test_power_special_cases() is similar to # test_uncertainties.py:test_power_special_cases(), but with small # differences: the built-in pow() and math.pow() are slightly # different: def test_power_special_cases(): - ''' + """ Checks special cases of umath_core.pow(). - ''' + """ power_special_cases(umath_core.pow) @@ -299,20 +310,20 @@ def test_power_special_cases(): except Exception as err_math: # Python 3 does not make exceptions local variables: they are # restricted to their except block: - err_math_args = err_math.args - exception_class = err_math.__class__ + err_math_args = err_math.args # noqa + exception_class = err_math.__class__ # noqa # http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python try: umath_core.pow(ufloat(0, 0.1), negative) - except exception_class as err: # "as err", for Python 2.6+ + except exception_class: # "as err", for Python 2.6+ pass else: - raise Exception('%s exception expected' % exception_class.__name__) + raise Exception("%s exception expected" % exception_class.__name__) try: - result = umath_core.pow(negative, positive) + result = umath_core.pow(negative, positive) # noqa except exception_class: # Assumed: same exception as for pow(0, negative) # The reason why it should also fail in Python 3 is that the # result of Python 3 is a complex number, which uncertainties @@ -321,10 +332,11 @@ def test_power_special_cases(): # know how to calculate it. pass else: - raise Exception('%s exception expected' % exception_class.__name__) + raise Exception("%s exception expected" % exception_class.__name__) + def test_power_wrt_ref(): - ''' + """ Checks special cases of the umath_core.pow() power operator. - ''' - power_wrt_ref(umath_core.pow, math.pow) \ No newline at end of file + """ + power_wrt_ref(umath_core.pow, math.pow) diff --git a/tests/test_uncertainties.py b/tests/test_uncertainties.py index 22b134ea..54852ae8 100644 --- a/tests/test_uncertainties.py +++ b/tests/test_uncertainties.py @@ -1,22 +1,29 @@ - import copy import math import sys +import random # noqa from math import isnan import uncertainties.core as uncert_core from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr from uncertainties import formatting from uncertainties import umath -from helpers import (power_special_cases, power_all_cases, power_wrt_ref,numbers_close, - ufloats_close, compare_derivatives, uarrays_close) +from helpers import ( + power_special_cases, + power_all_cases, + power_wrt_ref, + numbers_close, + ufloats_close, + compare_derivatives, + uarrays_close, +) def test_value_construction(): - ''' + """ Tests the various means of constructing a constant number with uncertainty *without a string* (see test_ufloat_fromstr(), for this). - ''' + """ ## Simple construction: x = ufloat(3, 0.14) @@ -25,16 +32,16 @@ def test_value_construction(): assert x.tag is None # ... with tag as positional argument: - x = ufloat(3, 0.14, 'pi') + x = ufloat(3, 0.14, "pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 - assert x.tag == 'pi' + assert x.tag == "pi" # ... with tag keyword: - x = ufloat(3, 0.14, tag='pi') + x = ufloat(3, 0.14, tag="pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 - assert x.tag == 'pi' + assert x.tag == "pi" # Negative standard deviations should be caught in a nice way # (with the right exception): @@ -48,11 +55,12 @@ def test_value_construction(): try: ufloat(1) # Form that has never been allowed - except: + except TypeError: pass else: raise Exception("An exception should be raised") + def test_ufloat_fromstr(): "Input of numbers with uncertainties as a string" @@ -79,44 +87,37 @@ def test_ufloat_fromstr(): "169.0(7)": (169, 0.7), "-0.1+/-1": (-0.1, 1), "-13e-2+/-1e2": (-13e-2, 1e2), - '-14.(15)': (-14, 15), - '-100.0(15)': (-100, 1.5), - '14.(15)': (14, 15), + "-14.(15)": (-14, 15), + "-100.0(15)": (-100, 1.5), + "14.(15)": (14, 15), # Global exponent: - '(3.141+/-0.001)E+02': (314.1, 0.1), - - + "(3.141+/-0.001)E+02": (314.1, 0.1), ## Pretty-print notation: - # ± sign, global exponent (not pretty-printed): - u'(3.141±0.001)E+02': (314.1, 0.1), + "(3.141±0.001)E+02": (314.1, 0.1), # ± sign, individual exponent: - u'3.141E+02±0.001e2': (314.1, 0.1), + "3.141E+02±0.001e2": (314.1, 0.1), # ± sign, times symbol, superscript (= full pretty-print): - u'(3.141 ± 0.001) × 10²': (314.1, 0.1), - + "(3.141 ± 0.001) × 10²": (314.1, 0.1), ## Others - # Forced parentheses: - '(2 +/- 0.1)': (2, 0.1), - + "(2 +/- 0.1)": (2, 0.1), # NaN uncertainty: - u'(3.141±nan)E+02': (314.1, float('nan')), - '3.141e+02+/-nan': (314.1, float('nan')), - '3.4(nan)e10': (3.4e10, float('nan')), + "(3.141±nan)E+02": (314.1, float("nan")), + "3.141e+02+/-nan": (314.1, float("nan")), + "3.4(nan)e10": (3.4e10, float("nan")), # NaN value: - 'nan+/-3.14e2': (float('nan'), 314), + "nan+/-3.14e2": (float("nan"), 314), # "Double-floats" - '(-3.1415 +/- 1e-4)e+200': (-3.1415e200, 1e196), - '(-3.1415e-10 +/- 1e-4)e+200': (-3.1415e190, 1e196), + "(-3.1415 +/- 1e-4)e+200": (-3.1415e200, 1e196), + "(-3.1415e-10 +/- 1e-4)e+200": (-3.1415e190, 1e196), # Special float representation: - '-3(0.)': (-3, 0) - } - - for (representation, values) in tests.items(): + "-3(0.)": (-3, 0), + } + for representation, values in tests.items(): # We test the fact that surrounding spaces are removed: - representation = u' {} '.format(representation) + representation = " {} ".format(representation) # Without tag: num = ufloat_fromstr(representation) @@ -125,19 +126,21 @@ def test_ufloat_fromstr(): assert num.tag is None # With a tag as positional argument: - num = ufloat_fromstr(representation, 'test variable') + num = ufloat_fromstr(representation, "test variable") assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) - assert num.tag == 'test variable' + assert num.tag == "test variable" # With a tag as keyword argument: - num = ufloat_fromstr(representation, tag='test variable') + num = ufloat_fromstr(representation, tag="test variable") assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) - assert num.tag == 'test variable' + assert num.tag == "test variable" + ############################################################################### + # Test of correctness of the fixed (usually analytical) derivatives: def test_fixed_derivatives_basic_funcs(): """ @@ -159,7 +162,8 @@ def check_op(op, num_args): # by definition, to AffineScalarFunc objects: we first map # possible scalar arguments (used for calculating # derivatives) to AffineScalarFunc objects: - lambda *args: func(*map(uncert_core.to_affine_scalar, args))) + lambda *args: func(*map(uncert_core.to_affine_scalar, args)) + ) compare_derivatives(func, numerical_derivatives, [num_args]) # Operators that take 1 value: @@ -180,14 +184,14 @@ def test_copy(): y = copy.copy(x) assert x != y - assert not(x == y) + assert not (x == y) assert y in y.derivatives.keys() # y must not copy the dependence on x z = copy.deepcopy(x) assert x != z # Copy tests on expressions: - t = x + 2*z + t = x + 2 * z # t depends on x: assert x in t.derivatives @@ -196,16 +200,18 @@ def test_copy(): t_copy = copy.copy(t) # Shallow copy: the variables on which t depends are not copied: assert x in t_copy.derivatives - assert (uncert_core.covariance_matrix([t, z]) == - uncert_core.covariance_matrix([t_copy, z])) + assert uncert_core.covariance_matrix([t, z]) == uncert_core.covariance_matrix( + [t_copy, z] + ) # However, the relationship between a deep copy and the original # variables should be broken, since the deep copy created new, # independent variables: t_deepcopy = copy.deepcopy(t) assert x not in t_deepcopy.derivatives - assert (uncert_core.covariance_matrix([t, z]) != - uncert_core.covariance_matrix([t_deepcopy, z])) + assert uncert_core.covariance_matrix([t, z]) != uncert_core.covariance_matrix( + [t_deepcopy, z] + ) # Test of implementations with weak references: @@ -219,20 +225,25 @@ def test_copy(): assert y in list(y.derivatives.keys()) + ## Classes for the pickling tests (put at the module level, so that ## they can be unpickled): + # Subclass without slots: class NewVariable_dict(uncert_core.Variable): pass + # Subclass with slots defined by a tuple: class NewVariable_slots_tuple(uncert_core.Variable): - __slots__ = ('new_attr',) + __slots__ = ("new_attr",) + # Subclass with slots defined by a string: class NewVariable_slots_str(uncert_core.Variable): - __slots__ = 'new_attr' + __slots__ = "new_attr" + def test_pickling(): "Standard pickle module integration." @@ -246,7 +257,7 @@ def test_pickling(): assert x != x_unpickled # Pickling creates copies ## Tests with correlations and AffineScalarFunc objects: - f = 2*x + f = 2 * x assert isinstance(f, AffineScalarFunc) (f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x))) # Correlations must be preserved: @@ -254,16 +265,14 @@ def test_pickling(): ## Tests with subclasses: - for subclass in (NewVariable_dict, NewVariable_slots_tuple, - NewVariable_slots_str): - + for subclass in (NewVariable_dict, NewVariable_slots_tuple, NewVariable_slots_str): x = subclass(3, 0.14) # Pickling test with possibly uninitialized slots: pickle.loads(pickle.dumps(x)) # Unpickling test: - x.new_attr = 'New attr value' + x.new_attr = "New attr value" x_unpickled = pickle.loads(pickle.dumps(x)) # Must exist (from the slots of the parent class): x_unpickled.nominal_value @@ -279,17 +288,17 @@ def test_pickling(): # the pickling process must pickle the correct value (i.e., not # the value from __dict__): x = NewVariable_dict(3, 0.14) - x._nominal_value = 'in slots' + x._nominal_value = "in slots" # Corner case: __dict__ key which is also a slot name (it is # shadowed by the corresponding slot, so this is very unusual, # though): - x.__dict__['_nominal_value'] = 'in dict' + x.__dict__["_nominal_value"] = "in dict" # Additional __dict__ attribute: - x.dict_attr = 'dict attribute' + x.dict_attr = "dict attribute" x_unpickled = pickle.loads(pickle.dumps(x)) # We make sure that the data is still there and untouched: - assert x_unpickled._nominal_value == 'in slots' + assert x_unpickled._nominal_value == "in slots" assert x_unpickled.__dict__ == x.__dict__ ## @@ -303,22 +312,22 @@ def test_pickling(): x = uncert_core.LinearCombination({}) assert pickle.loads(pickle.dumps(x)).linear_combo == {} + def test_int_div(): "Integer division" # We perform all operations on floats, because derivatives can # otherwise be meaningless: - x = ufloat(3.9, 2)//2 - assert x.nominal_value == 1. + x = ufloat(3.9, 2) // 2 + assert x.nominal_value == 1.0 # All errors are supposed to be small, so the ufloat() # in x violates the assumption. Therefore, the following is # correct: assert x.std_dev == 0.0 + def test_comparison_ops(): "Test of comparison operators" - import random - # Operations on quantities equivalent to Python numbers must still # be correct: a = ufloat(-3, 0) @@ -340,18 +349,18 @@ def test_comparison_ops(): assert x > 1 # The limit case is not obvious: - assert not(x >= 3) - assert not(x < 3) + assert not (x >= 3) + assert not (x < 3) assert x == x # Comparaison between Variable and AffineScalarFunc: assert x == x + 0 # Comparaison between 2 _different_ AffineScalarFunc objects # representing the same value: - assert x/2 == x/2 + assert x / 2 == x / 2 # With uncorrelated result that have the same behavior (value and # standard error): - assert 2*ufloat(1, 0.1) != ufloat(2, 0.2) + assert 2 * ufloat(1, 0.1) != ufloat(2, 0.2) # Comparaison between 2 _different_ Variable objects # that are uncorrelated: assert x != ufloat(3, 0.1) @@ -359,11 +368,10 @@ def test_comparison_ops(): assert x != ufloat(3, 0.2) # Comparison to other types should work: - assert x != None # Not comparable - assert x-x == 0 # Comparable, even though the types are different + assert x is not None # Not comparable + assert x - x == 0 # Comparable, even though the types are different assert x != [1, 2] - #################### # Checks of the semantics of logical operations: they return True @@ -385,21 +393,16 @@ def test_all_comparison_ops(x, y): correctly implemented comparison operators. """ - import random - def random_float(var): """ Returns a random value for Variable var, in an infinitesimal interval withing its uncertainty. The case of a zero uncertainty is special. """ - return ((random.random()-0.5) * min(var.std_dev, 1e-5) - + var.nominal_value) + return (random.random() - 0.5) * min(var.std_dev, 1e-5) + var.nominal_value # All operations are tested: - for op in ["__%s__" % name - for name in('ne', 'eq', 'lt', 'le', 'gt', 'ge')]: - + for op in ["__%s__" % name for name in ("ne", "eq", "lt", "le", "gt", "ge")]: try: float_func = getattr(float, op) except AttributeError: # Python 2.3's floats don't have __ne__ @@ -416,8 +419,7 @@ def random_float(var): sampled_results.append(float_func(x.nominal_value, y.nominal_value)) for check_num in range(50): # Many points checked - sampled_results.append(float_func(random_float(x), - random_float(y))) + sampled_results.append(float_func(random_float(x), random_float(y))) min_result = min(sampled_results) max_result = max(sampled_results) @@ -425,34 +427,35 @@ def random_float(var): if min_result == max_result: correct_result = min_result else: - # Almost all results must be True, for the final value # to be True: num_min_result = sampled_results.count(min_result) # 1 exception is considered OK: - correct_result = (num_min_result == 1) + correct_result = num_min_result == 1 try: assert correct_result == getattr(x, op)(y) except AssertionError: print("Sampling results:", sampled_results) - raise Exception("Semantic value of %s %s (%s) %s not" - " correctly reproduced." - % (x, op, y, correct_result)) + raise Exception( + "Semantic value of %s %s (%s) %s not" + " correctly reproduced." % (x, op, y, correct_result) + ) # With different numbers: - test_all_comparison_ops(ufloat(3, 0.1), - ufloat(-2, 0.1)) - test_all_comparison_ops(ufloat(0, 0), # Special number - ufloat(1, 1)) - test_all_comparison_ops(ufloat(0, 0), # Special number - ufloat(0, 0.1)) + test_all_comparison_ops(ufloat(3, 0.1), ufloat(-2, 0.1)) + test_all_comparison_ops( + ufloat(0, 0), # Special number + ufloat(1, 1), + ) + test_all_comparison_ops( + ufloat(0, 0), # Special number + ufloat(0, 0.1), + ) # With identical numbers: - test_all_comparison_ops(ufloat(0, 0), - ufloat(0, 0)) - test_all_comparison_ops(ufloat(1, 1), - ufloat(1, 1)) + test_all_comparison_ops(ufloat(0, 0), ufloat(0, 0)) + test_all_comparison_ops(ufloat(1, 1), ufloat(1, 1)) def test_logic(): @@ -463,10 +466,10 @@ def test_logic(): z = ufloat(0, 0.1) t = ufloat(-1, 2) - assert bool(x) == True - assert bool(y) == False - assert bool(z) == True - assert bool(t) == True # Only infinitseimal neighborhood are used + assert bool(x) + assert not bool(y) + assert bool(z) + assert bool(t) # Only infinitseimal neighborhood are used def test_basic_access_to_data(): @@ -485,7 +488,7 @@ def test_basic_access_to_data(): # Details on the sources of error: a = ufloat(-1, 0.001) - y = 2*x + 3*x + 2 + a + y = 2 * x + 3 * x + 2 + a error_sources = y.error_components() assert len(error_sources) == 2 # 'a' and 'x' assert error_sources[x] == 0.05 @@ -500,17 +503,16 @@ def test_basic_access_to_data(): # Calculated values with uncertainties should not have a settable # standard deviation: - y = 2*x + y = 2 * x try: y.std_dev = 1 except AttributeError: pass else: - raise Exception( - "std_dev should not be settable for calculated results") + raise Exception("std_dev should not be settable for calculated results") # Calculation of deviations in units of the standard deviations: - assert 10/x.std_dev == x.std_score(10 + x.nominal_value) + assert 10 / x.std_dev == x.std_score(10 + x.nominal_value) # "In units of the standard deviation" is not always meaningful: x.std_dev = 0 @@ -519,15 +521,16 @@ def test_basic_access_to_data(): except ValueError: pass # Normal behavior + def test_correlations(): "Correlations between variables" a = ufloat(1, 0) x = ufloat(4, 0.1) - y = x*2 + a + y = x * 2 + a # Correlations cancel "naive" additions of uncertainties: assert y.std_dev != 0 - normally_zero = y - (x*2 + 1) + normally_zero = y - (x * 2 + 1) assert normally_zero.nominal_value == 0 assert normally_zero.std_dev == 0 @@ -547,13 +550,14 @@ def test_no_coercion(): else: raise Exception("Conversion to float() should fail with TypeError") + def test_wrapped_func_no_args_no_kwargs(): - ''' + """ Wrap a function that takes only positional-or-keyword parameters. - ''' + """ def f_auto_unc(x, y): - return 2*x+umath.sin(y) + return 2 * x + umath.sin(y) # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y): @@ -584,8 +588,7 @@ def f(x, y): ### Explicit derivatives: ## Fully defined derivatives: - f_wrapped = uncert_core.wrap(f, [lambda x, y: 2, - lambda x, y: math.cos(y)]) + f_wrapped = uncert_core.wrap(f, [lambda x, y: 2, lambda x, y: math.cos(y)]) assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) @@ -599,24 +602,26 @@ def f(x, y): # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) + def test_wrapped_func_args_no_kwargs(): - ''' + """ Wrap a function that takes only positional-or-keyword and var-positional parameters. - ''' + """ def f_auto_unc(x, y, *args): - return 2*x+umath.sin(y)+3*args[1] + return 2 * x + umath.sin(y) + 3 * args[1] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args): - assert not any(isinstance(value, uncert_core.UFloat) - for value in [x, y] + list(args)) + assert not any( + isinstance(value, uncert_core.UFloat) for value in [x, y] + list(args) + ) return f_auto_unc(x, y, *args) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) - s = 'string arg' + s = "string arg" z = uncert_core.ufloat(100, 3) args = [s, z, s] # var-positional parameters @@ -635,10 +640,15 @@ def f(x, y, *args): ### Explicit derivatives: ## Fully defined derivatives: - f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2, - lambda x, y, *args: math.cos(y), - None, - lambda x, y, *args: 3]) + f_wrapped = uncert_core.wrap( + f, + [ + lambda x, y, *args: 2, + lambda x, y, *args: math.cos(y), + None, + lambda x, y, *args: 3, + ], + ) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) @@ -648,38 +658,39 @@ def f(x, y, *args): f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2]) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) + def test_wrapped_func_no_args_kwargs(): - ''' + """ Wrap a function that takes only positional-or-keyword and var-keyword parameters. - ''' + """ def f_auto_unc(x, y, **kwargs): - return 2*x+umath.sin(y)+3*kwargs['z'] + return 2 * x + umath.sin(y) + 3 * kwargs["z"] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, **kwargs): - assert not any(isinstance(value, uncert_core.UFloat) - for value in [x, y] + list(kwargs.values())) + assert not any( + isinstance(value, uncert_core.UFloat) + for value in [x, y] + list(kwargs.values()) + ) return f_auto_unc(x, y, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) - s = 'string arg' + s = "string arg" z = uncert_core.ufloat(100, 3) - kwargs = {'s': s, 'z': z} # Arguments not in signature + kwargs = {"s": s, "z": z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: @@ -687,33 +698,26 @@ def f(x, y, **kwargs): # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None]) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: - f_wrapped = uncert_core.wrap(f, [None], {'z': None}) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + f_wrapped = uncert_core.wrap(f, [None], {"z": None}) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: - f_wrapped = uncert_core.wrap(f, [None], - {'z': lambda x, y, **kwargs: 3}) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + f_wrapped = uncert_core.wrap(f, [None], {"z": lambda x, y, **kwargs: 3}) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ### Explicit derivatives: @@ -721,98 +725,114 @@ def f(x, y, **kwargs): f_wrapped = uncert_core.wrap( f, [lambda x, y, **kwargs: 2, lambda x, y, **kwargs: math.cos(y)], - {'z:': lambda x, y, **kwargs: 3}) + {"z:": lambda x, y, **kwargs: 3}, + ) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, **kwargs: 2]) - assert ufloats_close(f_auto_unc(x, y, **kwargs), - f_wrapped(x, y, **kwargs)) + assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: - assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), - f_wrapped(y=y, x=x, **kwargs)) + assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) + def test_wrapped_func_args_kwargs(): - ''' + """ Wrap a function that takes positional-or-keyword, var-positional and var-keyword parameters. - ''' + """ def f_auto_unc(x, y, *args, **kwargs): - return 2*x+umath.sin(y)+4*args[1]+3*kwargs['z'] + return 2 * x + umath.sin(y) + 4 * args[1] + 3 * kwargs["z"] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args, **kwargs): - assert not any(isinstance(value, uncert_core.UFloat) - for value in [x, y]+list(args)+list(kwargs.values())) + assert not any( + isinstance(value, uncert_core.UFloat) + for value in [x, y] + list(args) + list(kwargs.values()) + ) return f_auto_unc(x, y, *args, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) t = uncert_core.ufloat(1000, 4) - s = 'string arg' + s = "string arg" z = uncert_core.ufloat(100, 3) args = [s, t, s] - kwargs = {'u': s, 'z': z} # Arguments not in signature + kwargs = {"u": s, "z": z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: - f_wrapped = uncert_core.wrap(f, [None, None, None, - lambda x, y, *args, **kwargs: 4]) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + f_wrapped = uncert_core.wrap(f, [None, None, None, lambda x, y, *args, **kwargs: 4]) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: - f_wrapped = uncert_core.wrap(f, [None], {'z': None}) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + f_wrapped = uncert_core.wrap(f, [None], {"z": None}) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: - f_wrapped = uncert_core.wrap(f, [None], - {'z': lambda x, y, *args, **kwargs: 3}) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + f_wrapped = uncert_core.wrap(f, [None], {"z": lambda x, y, *args, **kwargs: 3}) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, - [lambda x, y, *args, **kwargs: 2, - lambda x, y, *args, **kwargs: math.cos(y)], - {'z:': lambda x, y, *args, **kwargs: 3}) + [lambda x, y, *args, **kwargs: 2, lambda x, y, *args, **kwargs: math.cos(y)], + {"z:": lambda x, y, *args, **kwargs: 3}, + ) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args, **kwargs: 2]) - assert ufloats_close(f_auto_unc(x, y, *args, **kwargs), - f_wrapped(x, y, *args, **kwargs), tolerance=1e-5) + assert ufloats_close( + f_auto_unc(x, y, *args, **kwargs), + f_wrapped(x, y, *args, **kwargs), + tolerance=1e-5, + ) def test_wrapped_func(): @@ -831,13 +851,11 @@ def f(angle, *list_var): # We make sure that this function is only ever called with # numbers with no uncertainty (since it is wrapped): assert not isinstance(angle, uncert_core.UFloat) - assert not any(isinstance(arg, uncert_core.UFloat) - for arg in list_var) + assert not any(isinstance(arg, uncert_core.UFloat) for arg in list_var) return f_auto_unc(angle, *list_var) f_wrapped = uncert_core.wrap(f) - my_list = [1, 2, 3] ######################################## @@ -845,7 +863,7 @@ def f(angle, *list_var): # function: it should obtain the exact same result: assert f_wrapped(0, *my_list) == f(0, *my_list) # 1 == 1 +/- 0, so the type must be checked too: - assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list)) + assert isinstance(f_wrapped(0, *my_list), type(f(0, *my_list))) ######################################## # Call with uncertainties: @@ -855,61 +873,64 @@ def f(angle, *list_var): # The random variables must be the same (full correlation): - assert ufloats_close(f_wrapped(angle, *[1, angle]), - f_auto_unc(angle, *[1, angle])) + assert ufloats_close(f_wrapped(angle, *[1, angle]), f_auto_unc(angle, *[1, angle])) - assert ufloats_close(f_wrapped(angle, *[list_value, angle]), - f_auto_unc(angle, *[list_value, angle])) + assert ufloats_close( + f_wrapped(angle, *[list_value, angle]), f_auto_unc(angle, *[list_value, angle]) + ) ######################################## # Non-numerical arguments, and explicit and implicit derivatives: def f(x, y, z, t, u): - return x+2*z+3*t+4*u + return x + 2 * z + 3 * t + 4 * u f_wrapped = uncert_core.wrap( - f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u + f, [lambda *args: 1, None, lambda *args: 2, None] + ) # No deriv. for u - assert f_wrapped(10, 'string argument', 1, 0, 0) == 12 + assert f_wrapped(10, "string argument", 1, 0, 0) == 12 x = uncert_core.ufloat(10, 1) - assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev, - (1+2+3+4)*x.std_dev) + assert numbers_close( + f_wrapped(x, "string argument", x, x, x).std_dev, (1 + 2 + 3 + 4) * x.std_dev + ) + def test_wrap_with_kwargs(): - ''' + """ Tests wrap() on functions with keyword arguments. Includes both wrapping a function that takes optional keyword arguments and calling a wrapped function with keyword arguments (optional or not). - ''' + """ # Version of f() that automatically works with numbers with # uncertainties: def f_auto_unc(x, y, *args, **kwargs): - return x + umath.sin(y) + 2*args[0] + 3*kwargs['t'] + return x + umath.sin(y) + 2 * args[0] + 3 * kwargs["t"] # We also add keyword arguments in the function which is wrapped: def f(x, y, *args, **kwargs): # We make sure that f is not called directly with a number with # uncertainty: - for value in [x, y]+list(args)+list(kwargs.values()): + for value in [x, y] + list(args) + list(kwargs.values()): assert not isinstance(value, uncert_core.UFloat) return f_auto_unc(x, y, *args, **kwargs) f_wrapped = uncert_core.wrap(f) - x = ufloat(1, 0.1) y = ufloat(10, 0.11) z = ufloat(100, 0.111) t = ufloat(0.1, 0.1111) - assert ufloats_close(f_wrapped(x, y, z, t=t), - f_auto_unc(x, y, z, t=t), tolerance=1e-5) + assert ufloats_close( + f_wrapped(x, y, z, t=t), f_auto_unc(x, y, z, t=t), tolerance=1e-5 + ) ######################################## @@ -917,16 +938,17 @@ def f(x, y, *args, **kwargs): # also test the automatic handling of additional *args arguments # beyond the number of supplied derivatives. - f_wrapped2 = uncert_core.wrap( - f, [None, lambda x, y, *args, **kwargs: math.cos(y)]) + f_wrapped2 = uncert_core.wrap(f, [None, lambda x, y, *args, **kwargs: math.cos(y)]) # The derivatives must be perfectly identical: # The *args parameter of f() is given as a keyword argument, so as # to try to confuse the code: - assert (f_wrapped2(x, y, z, t=t).derivatives[y] - == f_auto_unc(x, y, z, t=t).derivatives[y]) + assert ( + f_wrapped2(x, y, z, t=t).derivatives[y] + == f_auto_unc(x, y, z, t=t).derivatives[y] + ) # Derivatives supplied through the keyword-parameter dictionary of # derivatives, and also derivatives supplied for the @@ -935,58 +957,63 @@ def f(x, y, *args, **kwargs): f_wrapped3 = uncert_core.wrap( f, [None, None, lambda x, y, *args, **kwargs: 2], - {'t': lambda x, y, *args, **kwargs: 3}) + {"t": lambda x, y, *args, **kwargs: 3}, + ) # The derivatives should be exactly the same, because they are # obtained with the exact same analytic formula: - assert (f_wrapped3(x, y, z, t=t).derivatives[z] - == f_auto_unc(x, y, z, t=t).derivatives[z]) - assert (f_wrapped3(x, y, z, t=t).derivatives[t] - == f_auto_unc(x, y, z, t=t).derivatives[t]) + assert ( + f_wrapped3(x, y, z, t=t).derivatives[z] + == f_auto_unc(x, y, z, t=t).derivatives[z] + ) + assert ( + f_wrapped3(x, y, z, t=t).derivatives[t] + == f_auto_unc(x, y, z, t=t).derivatives[t] + ) ######################################## # Making sure that user-supplied derivatives are indeed called: class FunctionCalled(Exception): - ''' + """ Raised to signal that a function is indeed called. - ''' + """ + pass def failing_func(x, y, *args, **kwargs): raise FunctionCalled - f_wrapped4 = uncert_core.wrap( - f, - [None, failing_func], - {'t': failing_func}) + f_wrapped4 = uncert_core.wrap(f, [None, failing_func], {"t": failing_func}) try: f_wrapped4(x, 3.14, z, t=t) except FunctionCalled: pass else: - raise Exception('User-supplied derivative should be called') + raise Exception("User-supplied derivative should be called") try: f_wrapped4(x, y, z, t=3.14) except FunctionCalled: pass else: - raise Exception('User-supplied derivative should be called') + raise Exception("User-supplied derivative should be called") try: f_wrapped4(x, 3.14, z, t=3.14) except FunctionCalled: - raise Exception('User-supplied derivative should *not* be called') + raise Exception("User-supplied derivative should *not* be called") + ############################################################################### + def test_access_to_std_dev(): "Uniform access to the standard deviation" x = ufloat(1, 0.1) - y = 2*x + y = 2 * x # std_dev for Variable and AffineScalarFunc objects: assert uncert_core.std_dev(x) == x.std_dev @@ -996,14 +1023,16 @@ def test_access_to_std_dev(): assert uncert_core.std_dev([]) == 0 assert uncert_core.std_dev(None) == 0 + ############################################################################### + def test_covariances(): "Covariance matrix" x = ufloat(1, 0.1) - y = -2*x+10 - z = -3*x + y = -2 * x + 10 + z = -3 * x covs = uncert_core.covariance_matrix([x, y, z]) # Diagonal elements are simple: assert numbers_close(covs[0][0], 0.01) @@ -1015,19 +1044,20 @@ def test_covariances(): ############################################################################### def test_power_all_cases(): - ''' + """ Checks all cases for the value and derivatives of x**p. - ''' + """ power_all_cases(pow) ############################################################################### + def test_power_special_cases(): - ''' + """ Checks special cases of x**p. - ''' + """ power_special_cases(pow) # We want the same behavior for numbers with uncertainties and for @@ -1050,10 +1080,10 @@ def test_power_special_cases(): except ZeroDivisionError: pass else: - raise Exception('A proper exception should have been raised') + raise Exception("A proper exception should have been raised") try: - result = pow(negative, positive) + result = pow(negative, positive) # noqa except ValueError: # The reason why it should also fail in Python 3 is that the # result of Python 3 is a complex number, which uncertainties @@ -1062,64 +1092,68 @@ def test_power_special_cases(): # know how to calculate it. pass else: - raise Exception('A proper exception should have been raised') + raise Exception("A proper exception should have been raised") + def test_power_wrt_ref(): - ''' + """ Checks special cases of the built-in pow() power operator. - ''' + """ power_wrt_ref(pow, pow) ############################################################################### + def test_PDG_precision(): - ''' + """ Test of the calculation of the number of significant digits for the uncertainty. - ''' + """ # The 3 cases of the rounding rules are covered in each case: tests = { # Very big floats: 1.7976931348623157e308: (2, 1.7976931348623157e308), 0.5e308: (1, 0.5e308), - 0.9976931348623157e+308: (2, 1e308), + 0.9976931348623157e308: (2, 1e308), # Very small floats: 1.3e-323: (2, 1.3e-323), 5e-324: (1, 5e-324), - 9.99e-324: (2, 1e-323) - } + 9.99e-324: (2, 1e-323), + } - for (std_dev, result) in tests.items(): + for std_dev, result in tests.items(): assert formatting.PDG_precision(std_dev) == result + def test_repr(): - '''Test the representation of numbers with uncertainty.''' + """Test the representation of numbers with uncertainty.""" # The uncertainty is a power of 2, so that it can be exactly # represented: x = ufloat(3.14159265358979, 0.25) - assert repr(x) == '3.14159265358979+/-0.25' + assert repr(x) == "3.14159265358979+/-0.25" x = ufloat(3.14159265358979, 0) - assert repr(x) == '3.14159265358979+/-0' + assert repr(x) == "3.14159265358979+/-0" # Tagging: x = ufloat(3, 1, "length") - assert repr(x) == '< length = 3.0+/-1.0 >' + assert repr(x) == "< length = 3.0+/-1.0 >" + def test_format(): - '''Test the formatting of numbers with uncertainty.''' + """Test the formatting of numbers with uncertainty.""" # The way NaN is formatted with F, E and G depends on the version # of Python (NAN for Python 2.5+ at least): - NaN_EFG = '%F' % float('nan') + NaN_EFG = "%F" % float("nan") # !! The way NaN is formatted with F, E and G might depend on the # version of Python, if it is like NaN (could be tested with # Python 2.3 or 2.4 vs Python 2.7): - Inf_EFG = '%F' % float('inf') + Inf_EFG = "%F" % float("inf") # Tests of each point of the docstring of # AffineScalarFunc.__format__() in turn, mostly in the same order. @@ -1129,165 +1163,141 @@ def test_format(): # way, problems in the customization themselves are caught. tests = { # (Nominal value, uncertainty): {format: result,...} - # Usual float formatting, and individual widths, etc.: (3.1415, 0.0001): { - '*^+7.2f': '*+3.14*+/-*0.00**', - '+07.2f': '+003.14+/-0000.00', # 0 fill - '>10f': ' 3.141500+/- 0.000100', # Width and align - '11.3e': ' 3.142e+00+/- 0.000e+00', # Duplicated exponent - '0.4e': '3.1415e+00+/-0.0000e+00' # Forced double exponent + "*^+7.2f": "*+3.14*+/-*0.00**", + "+07.2f": "+003.14+/-0000.00", # 0 fill + ">10f": " 3.141500+/- 0.000100", # Width and align + "11.3e": " 3.142e+00+/- 0.000e+00", # Duplicated exponent + "0.4e": "3.1415e+00+/-0.0000e+00", # Forced double exponent }, - # Full generalization of float formatting: - (3.1415, 0.0001): { - '+09.2uf': '+03.14150+/-000.00010', + (3.1415, 0.0001): { # noqa + "+09.2uf": "+03.14150+/-000.00010", # Alignment is not available with the % formatting # operator of Python < 2.6: - '*^+9.2uf': '+3.14150*+/-*0.00010*', - '>9f': ' 3.14150+/- 0.00010' # Width and align + "*^+9.2uf": "+3.14150*+/-*0.00010*", + ">9f": " 3.14150+/- 0.00010", # Width and align }, - # Number of digits of the uncertainty fixed: (123.456789, 0.00123): { - '.1uf': '123.457+/-0.001', - '.2uf': '123.4568+/-0.0012', - '.3uf': '123.45679+/-0.00123', - '.2ue': '(1.234568+/-0.000012)e+02' + ".1uf": "123.457+/-0.001", + ".2uf": "123.4568+/-0.0012", + ".3uf": "123.45679+/-0.00123", + ".2ue": "(1.234568+/-0.000012)e+02", }, # Sign handling: (-123.456789, 0.00123): { - '.1uf': '-123.457+/-0.001', - '.2uf': '-123.4568+/-0.0012', - '.3uf': '-123.45679+/-0.00123', - '.2ue': '(-1.234568+/-0.000012)e+02' + ".1uf": "-123.457+/-0.001", + ".2uf": "-123.4568+/-0.0012", + ".3uf": "-123.45679+/-0.00123", + ".2ue": "(-1.234568+/-0.000012)e+02", }, # Uncertainty larger than the nominal value: - (12.3, 456.78): { - '': '12+/-457', - '.1uf': '12+/-457', - '.4uf': '12.3+/-456.8' - }, + (12.3, 456.78): {"": "12+/-457", ".1uf": "12+/-457", ".4uf": "12.3+/-456.8"}, # ... Same thing, but with an exponent: - (12.3, 456.78): { - '.1ue': '(0+/-5)e+02', - '.4ue': '(0.123+/-4.568)e+02', - '.4ueS': '0.123(4.568)e+02' + (12.3, 456.78): { # noqa + ".1ue": "(0+/-5)e+02", + ".4ue": "(0.123+/-4.568)e+02", + ".4ueS": "0.123(4.568)e+02", }, - - (23456.789123, 1234.56789123): { - '.6gS': '23456.8(1234.6)' - }, - + (23456.789123, 1234.56789123): {".6gS": "23456.8(1234.6)"}, # Test of the various float formats: the nominal value should # have a similar representation as if it were directly # represented as a float: (1234567.89, 0.1): { - '.0e': '(1+/-0)e+06', - 'e': '(1.23456789+/-0.00000010)e+06', - 'E': '(1.23456789+/-0.00000010)E+06', - 'f': '1234567.89+/-0.10', - 'F': '1234567.89+/-0.10', - 'g': '1234567.89+/-0.10', - 'G': '1234567.89+/-0.10', - '%': '(123456789+/-10)%' - }, - (1234567.89, 4.3): { - 'g': '1234568+/-4' + ".0e": "(1+/-0)e+06", + "e": "(1.23456789+/-0.00000010)e+06", + "E": "(1.23456789+/-0.00000010)E+06", + "f": "1234567.89+/-0.10", + "F": "1234567.89+/-0.10", + "g": "1234567.89+/-0.10", + "G": "1234567.89+/-0.10", + "%": "(123456789+/-10)%", }, + (1234567.89, 4.3): {"g": "1234568+/-4"}, (1234567.89, 43): { # Case where g triggers the exponent notation - 'g': '(1.23457+/-0.00004)e+06', - 'G': '(1.23457+/-0.00004)E+06' + "g": "(1.23457+/-0.00004)e+06", + "G": "(1.23457+/-0.00004)E+06", }, - - - (3.1415, 0.0001): { - '+09.2uf': '+03.14150+/-000.00010' - }, - + (3.1415, 0.0001): {"+09.2uf": "+03.14150+/-000.00010"}, # noqa (1234.56789, 0.1): { - '.0f': '(1234+/-0.)', # Approximate error indicated with "." - 'e': '(1.23456+/-0.00010)e+03', - 'E': '(1.23456+/-0.00010)E+03', - 'f': '1234.57+/-0.10', - 'F': '1234.57+/-0.10', - 'f': '1234.57+/-0.10', - 'F': '1234.57+/-0.10', - '%': '123457+/-10%' + ".0f": "(1234+/-0.)", # Approximate error indicated with "." + "e": "(1.23456+/-0.00010)e+03", + "E": "(1.23456+/-0.00010)E+03", + "f": "1234.57+/-0.10", + "F": "1234.57+/-0.10", + "f": "1234.57+/-0.10", # noqa + "F": "1234.57+/-0.10", # noqa + "%": "123457+/-10%", }, - # Percent notation: (0.42, 0.0055): { # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is # found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'. - '.1u%': '(42.0+/-0.5)%', - '.1u%S': '42.0(5)%', - '%P': u'(42.0±0.5)%' + ".1u%": "(42.0+/-0.5)%", + ".1u%S": "42.0(5)%", + "%P": "(42.0±0.5)%", }, - # Particle Data Group automatic convention, including limit cases: - (1.2345678, 0.354): {'': '1.23+/-0.35'}, - (1.2345678, 0.3549): {'': '1.23+/-0.35'}, - (1.2345678, 0.355): {'': '1.2+/-0.4'}, - (1.5678, 0.355): {'': '1.6+/-0.4'}, - (1.2345678, 0.09499): {'': '1.23+/-0.09'}, - (1.2345678, 0.095): {'': '1.23+/-0.10'}, - + (1.2345678, 0.354): {"": "1.23+/-0.35"}, + (1.2345678, 0.3549): {"": "1.23+/-0.35"}, + (1.2345678, 0.355): {"": "1.2+/-0.4"}, + (1.5678, 0.355): {"": "1.6+/-0.4"}, + (1.2345678, 0.09499): {"": "1.23+/-0.09"}, + (1.2345678, 0.095): {"": "1.23+/-0.10"}, # Automatic extension of the uncertainty up to the decimal # point: (1000, 123): { - '.1uf': '1000+/-123', + ".1uf": "1000+/-123", # The nominal value has 1 <= mantissa < 10. The precision # is the number of significant digits of the uncertainty: - '.1ue': '(1.0+/-0.1)e+03' + ".1ue": "(1.0+/-0.1)e+03", }, - # Spectroscopic notation: (-1.23, 3.4): { - 'S': '-1.2(3.4)', - '.2ufS': '-1.2(3.4)', - '.3ufS': '-1.23(3.40)', + "S": "-1.2(3.4)", + ".2ufS": "-1.2(3.4)", + ".3ufS": "-1.23(3.40)", }, (-123.456, 0.123): { - 'S': '-123.46(12)', - '.1ufS': '-123.5(1)', - '.2ufS': '-123.46(12)', - '.3ufS': '-123.456(123)', + "S": "-123.46(12)", + ".1ufS": "-123.5(1)", + ".2ufS": "-123.46(12)", + ".3ufS": "-123.456(123)", }, (-123.456, 0.567): { - 'S': '-123.5(6)', - '.1ufS': '-123.5(6)', - '.2ufS': '-123.46(57)', - '.3ufS': '-123.456(567)', + "S": "-123.5(6)", + ".1ufS": "-123.5(6)", + ".2ufS": "-123.46(57)", + ".3ufS": "-123.456(567)", }, (-123.456, 0.004): { # The decimal point shows that the uncertainty is not # exact: - '.2fS': '-123.46(0.00)' + ".2fS": "-123.46(0.00)" }, - # LaTeX notation: # - (1234.56789, 0.1): { - 'eL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}', - 'EL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}', - 'fL': r'1234.57 \pm 0.10', - 'FL': r'1234.57 \pm 0.10', - 'fL': r'1234.57 \pm 0.10', - 'FL': r'1234.57 \pm 0.10', - '%L': r'\left(123457 \pm 10\right) \%' + (1234.56789, 0.1): { # noqa + "eL": r"\left(1.23457 \pm 0.00010\right) \times 10^{3}", + "EL": r"\left(1.23457 \pm 0.00010\right) \times 10^{3}", + "fL": r"1234.57 \pm 0.10", + "FL": r"1234.57 \pm 0.10", + "fL": r"1234.57 \pm 0.10", # noqa + "FL": r"1234.57 \pm 0.10", # noqa + "%L": r"\left(123457 \pm 10\right) \%", }, # # ... combined with the spectroscopic notation: - (-1.23, 3.4): { - 'SL': '-1.2(3.4)', - 'LS': '-1.2(3.4)', - '.2ufSL': '-1.2(3.4)', - '.2ufLS': '-1.2(3.4)' + (-1.23, 3.4): { # noqa + "SL": "-1.2(3.4)", + "LS": "-1.2(3.4)", + ".2ufSL": "-1.2(3.4)", + ".2ufLS": "-1.2(3.4)", }, - # Special cases for the uncertainty (0, nan) and format # strings (extension S, L, U,..., global width, etc.). # @@ -1295,56 +1305,52 @@ def test_format(): # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. (-1.2e-12, 0): { - '12.2gPL': u' -1.2×10⁻¹²± 0', + "12.2gPL": " -1.2×10⁻¹²± 0", # Pure "width" formats are not accepted by the % operator, # and only %-compatible formats are accepted, for Python < # 2.6: - '13S': ' -1.2(0)e-12', - '10P': u'-1.2×10⁻¹²± 0', - 'L': r'\left(-1.2 \pm 0\right) \times 10^{-12}', + "13S": " -1.2(0)e-12", + "10P": "-1.2×10⁻¹²± 0", + "L": r"\left(-1.2 \pm 0\right) \times 10^{-12}", # No factored exponent, LaTeX - '1L': r'-1.2 \times 10^{-12} \pm 0', - 'SL': r'-1.2(0) \times 10^{-12}', - 'SP': u'-1.2(0)×10⁻¹²' + "1L": r"-1.2 \times 10^{-12} \pm 0", + "SL": r"-1.2(0) \times 10^{-12}", + "SP": "-1.2(0)×10⁻¹²", }, - # Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001 # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. - (-1.2e-12, float('nan')): { - '.2uG': '(-1.2+/-%s)E-12' % NaN_EFG, # u ignored, format used - '15GS': ' -1.2(%s)E-12' % NaN_EFG, - 'SL': r'-1.2(\mathrm{nan}) \times 10^{-12}', # LaTeX NaN + (-1.2e-12, float("nan")): { + ".2uG": "(-1.2+/-%s)E-12" % NaN_EFG, # u ignored, format used + "15GS": " -1.2(%s)E-12" % NaN_EFG, + "SL": r"-1.2(\mathrm{nan}) \times 10^{-12}", # LaTeX NaN # Pretty-print priority, but not for NaN: - 'PSL': r'-1.2(\mathrm{nan})×10⁻¹²', - 'L': r'\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}', + "PSL": r"-1.2(\mathrm{nan})×10⁻¹²", + "L": r"\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}", # Uppercase NaN and LaTeX: - '.1EL': (r'\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}' - % NaN_EFG), - '10': ' -1.2e-12+/- nan', - '15S': ' -1.2(nan)e-12' + ".1EL": (r"\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}" % NaN_EFG), + "10": " -1.2e-12+/- nan", + "15S": " -1.2(nan)e-12", }, - (3.14e-10, 0.01e-10): { # Character (Unicode) strings: - u'P': u'(3.140±0.010)×10⁻¹⁰', # PDG rules: 2 digits - u'PL': u'(3.140±0.010)×10⁻¹⁰', # Pretty-print has higher priority + "P": "(3.140±0.010)×10⁻¹⁰", # PDG rules: 2 digits + "PL": "(3.140±0.010)×10⁻¹⁰", # Pretty-print has higher priority # Truncated non-zero uncertainty: - '.1e': '(3.1+/-0.0)e-10', - '.1eS': '3.1(0.0)e-10' + ".1e": "(3.1+/-0.0)e-10", + ".1eS": "3.1(0.0)e-10", }, - # Some special cases: - (1, float('nan')): { - 'g': '1+/-nan', - 'G': '1+/-%s' % NaN_EFG, - '%': '(100.000000+/-nan)%', # The % format type is like f + (1, float("nan")): { + "g": "1+/-nan", + "G": "1+/-%s" % NaN_EFG, + "%": "(100.000000+/-nan)%", # The % format type is like f # Should be the same as '+05', for floats, but is not, in # Python 2.7: - '+05g': '+0001+/-00nan', + "+05g": "+0001+/-00nan", # 5 is the *minimal* width, 6 is the default number of # digits after the decimal point: - '+05%': '(+100.000000+/-00nan)%', + "+05%": "(+100.000000+/-00nan)%", # There is a difference between '{}'.format(1.) and # '{:g}'.format(1.), which is not fully obvious in the # documentation, which indicates that a None format type @@ -1352,218 +1358,199 @@ def test_format(): # actually interpreted as str(), and that str() does not # have to behave like g ('{}'.format(1.234567890123456789) # and '{:g}'.format(1.234567890123456789) are different). - '': '1.0+/-nan', + "": "1.0+/-nan", # This is ugly, but consistent with # '{:+05}'.format(float('nan')) and format(1.) (which # differs from format(1)!): - '+05': '+01.0+/-00nan' - }, - - (9.9, 0.1): { - '.1ue': '(9.9+/-0.1)e+00', - '.0fS': '10(0.)' + "+05": "+01.0+/-00nan", }, + (9.9, 0.1): {".1ue": "(9.9+/-0.1)e+00", ".0fS": "10(0.)"}, (9.99, 0.1): { - # The precision has an effect on the exponent, like for - # floats: - '.2ue': '(9.99+/-0.10)e+00', # Same exponent as for 9.99 alone - '.1ue': '(1.00+/-0.01)e+01' # Same exponent as for 9.99 alone + # The precision has an effect on the exponent, like for + # floats: + ".2ue": "(9.99+/-0.10)e+00", # Same exponent as for 9.99 alone + ".1ue": "(1.00+/-0.01)e+01", # Same exponent as for 9.99 alone }, # 0 uncertainty: nominal value displayed like a float: (1.2345, 0): { - '.2ue': '(1.23+/-0)e+00', - '1.2ue': '1.23e+00+/-0', # No factored exponent - '.2uf': '1.23+/-0', - '.2ufS': '1.23(0)', - '.2fS': '1.23(0)', - 'g': '1.2345+/-0', - '': '1.2345+/-0' + ".2ue": "(1.23+/-0)e+00", + "1.2ue": "1.23e+00+/-0", # No factored exponent + ".2uf": "1.23+/-0", + ".2ufS": "1.23(0)", + ".2fS": "1.23(0)", + "g": "1.2345+/-0", + "": "1.2345+/-0", }, - # Alignment and filling characters (supported in Python 2.6+): (3.1415e10, 0): { - '<15': '31415000000.0 +/-0 ', - '<20S': '31415000000.0(0) ', + "<15": "31415000000.0 +/-0 ", + "<20S": "31415000000.0(0) ", # Trying to trip the format parsing with a fill character # which is an alignment character: - '=>15': '==31415000000.0+/-==============0' + "=>15": "==31415000000.0+/-==============0", }, - (1234.56789, 0): { - '1.2ue': '1.23e+03+/-0', # u ignored - '1.2e': '1.23e+03+/-0', + "1.2ue": "1.23e+03+/-0", # u ignored + "1.2e": "1.23e+03+/-0", # Default precision = 6 - 'eL': r'\left(1.234568 \pm 0\right) \times 10^{3}', - 'EL': r'\left(1.234568 \pm 0\right) \times 10^{3}', - 'fL': r'1234.567890 \pm 0', - 'FL': r'1234.567890 \pm 0', - '%L': r'\left(123456.789000 \pm 0\right) \%' - }, - - (1e5, 0): { - 'g': '100000+/-0' + "eL": r"\left(1.234568 \pm 0\right) \times 10^{3}", + "EL": r"\left(1.234568 \pm 0\right) \times 10^{3}", + "fL": r"1234.567890 \pm 0", + "FL": r"1234.567890 \pm 0", + "%L": r"\left(123456.789000 \pm 0\right) \%", }, + (1e5, 0): {"g": "100000+/-0"}, (1e6, 0): { # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): - 'g': '(1+/-0)e+06' + "g": "(1+/-0)e+06" }, - (1e6+10, 0): { + (1e6 + 10, 0): { # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): - 'g': '(1.00001+/-0)e+06' + "g": "(1.00001+/-0)e+06" }, # Rounding of the uncertainty that "changes" the number of # significant digits: (1, 0.994): { - '.3uf': '1.000+/-0.994', - '.2uf': '1.00+/-0.99', - '.1uf': '1+/-1' # Discontinuity in the number of digits + ".3uf": "1.000+/-0.994", + ".2uf": "1.00+/-0.99", + ".1uf": "1+/-1", # Discontinuity in the number of digits }, (12.3, 2.3): { - '.2ufS': '12.3(2.3)' # Decimal point on the uncertainty + ".2ufS": "12.3(2.3)" # Decimal point on the uncertainty }, - (12.3, 2.3): { - '.1ufS': '12(2)' # No decimal point on the uncertainty + (12.3, 2.3): { # noqa + ".1ufS": "12(2)" # No decimal point on the uncertainty }, (0, 0): { # Make defining the first significant digit problematic - '.1f': '0.0+/-0', # Simple float formatting - 'g': '0+/-0' + ".1f": "0.0+/-0", # Simple float formatting + "g": "0+/-0", }, (1.2e-34, 5e-67): { - '.6g': '(1.20000+/-0.00000)e-34', - '13.6g': ' 1.20000e-34+/- 0.00000e-34', - '13.6G': ' 1.20000E-34+/- 0.00000E-34', - '.6GL': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}', - '.6GLp': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}', + ".6g": "(1.20000+/-0.00000)e-34", + "13.6g": " 1.20000e-34+/- 0.00000e-34", + "13.6G": " 1.20000E-34+/- 0.00000E-34", + ".6GL": r"\left(1.20000 \pm 0.00000\right) \times 10^{-34}", + ".6GLp": r"\left(1.20000 \pm 0.00000\right) \times 10^{-34}", }, - - (float('nan'), 100): { # NaN *nominal value* - '': 'nan+/-100.0', # Like '{}'.format(100.) - 'g': 'nan+/-100', # Like '{:g}'.format(100.) - '.1e': '(nan+/-1.0)e+02', # Similar to 1±nan - '.1E': '(%s+/-1.0)E+02' % NaN_EFG, - '.1ue': '(nan+/-1)e+02', - '10.1e': ' nan+/- 1.0e+02' + (float("nan"), 100): { # NaN *nominal value* + "": "nan+/-100.0", # Like '{}'.format(100.) + "g": "nan+/-100", # Like '{:g}'.format(100.) + ".1e": "(nan+/-1.0)e+02", # Similar to 1±nan + ".1E": "(%s+/-1.0)E+02" % NaN_EFG, + ".1ue": "(nan+/-1)e+02", + "10.1e": " nan+/- 1.0e+02", }, - (float('nan'), 1e8): { # NaN *nominal value* - '': 'nan+/-100000000.0', # Like '{}'.format(1e8) - 'g': '(nan+/-1)e+08', # Like '{:g}'.format(1e8) - '.1e': '(nan+/-1.0)e+08', - '.1E': '(%s+/-1.0)E+08' % NaN_EFG, - '.1ue': '(nan+/-1)e+08', - '10.1e': ' nan+/- 1.0e+08' # 'nane+08' would be strange + (float("nan"), 1e8): { # NaN *nominal value* + "": "nan+/-100000000.0", # Like '{}'.format(1e8) + "g": "(nan+/-1)e+08", # Like '{:g}'.format(1e8) + ".1e": "(nan+/-1.0)e+08", + ".1E": "(%s+/-1.0)E+08" % NaN_EFG, + ".1ue": "(nan+/-1)e+08", + "10.1e": " nan+/- 1.0e+08", # 'nane+08' would be strange }, - (float('nan'), 123456789): { # NaN *nominal value* - '': 'nan+/-123456789.0', # Similar to '{}'.format(123456789.) - 'g': '(nan+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) - '.1e': '(nan+/-1.2)e+08', - '.1E': '(%s+/-1.2)E+08' % NaN_EFG, - '.1ue': '(nan+/-1)e+08', - '.1ueL': r'\left(\mathrm{nan} \pm 1\right) \times 10^{8}', - '10.1e': ' nan+/- 1.2e+08', - '10.1eL': r'\mathrm{nan} \pm 1.2 \times 10^{8}' + (float("nan"), 123456789): { # NaN *nominal value* + "": "nan+/-123456789.0", # Similar to '{}'.format(123456789.) + "g": "(nan+/-1.23457)e+08", # Similar to '{:g}'.format(123456789.) + ".1e": "(nan+/-1.2)e+08", + ".1E": "(%s+/-1.2)E+08" % NaN_EFG, + ".1ue": "(nan+/-1)e+08", + ".1ueL": r"\left(\mathrm{nan} \pm 1\right) \times 10^{8}", + "10.1e": " nan+/- 1.2e+08", + "10.1eL": r"\mathrm{nan} \pm 1.2 \times 10^{8}", }, - (float('nan'), float('nan')): { # *Double* NaN - '': 'nan+/-nan', - '.1e': 'nan+/-nan', - '.1E': '%s+/-%s' % (NaN_EFG, NaN_EFG), - '.1ue': 'nan+/-nan', - 'EL': r'\mathrm{%s} \pm \mathrm{%s}' % (NaN_EFG, NaN_EFG) + (float("nan"), float("nan")): { # *Double* NaN + "": "nan+/-nan", + ".1e": "nan+/-nan", + ".1E": "%s+/-%s" % (NaN_EFG, NaN_EFG), + ".1ue": "nan+/-nan", + "EL": r"\mathrm{%s} \pm \mathrm{%s}" % (NaN_EFG, NaN_EFG), }, - - (float('inf'), 100): { # Inf *nominal value* - '': 'inf+/-100.0', # Like '{}'.format(100.) - 'g': 'inf+/-100', # Like '{:g}'.format(100.) - '.1e': '(inf+/-1.0)e+02', # Similar to 1±inf - '.1E': '(%s+/-1.0)E+02' % Inf_EFG, - '.1ue': '(inf+/-1)e+02', - '10.1e': ' inf+/- 1.0e+02' + (float("inf"), 100): { # Inf *nominal value* + "": "inf+/-100.0", # Like '{}'.format(100.) + "g": "inf+/-100", # Like '{:g}'.format(100.) + ".1e": "(inf+/-1.0)e+02", # Similar to 1±inf + ".1E": "(%s+/-1.0)E+02" % Inf_EFG, + ".1ue": "(inf+/-1)e+02", + "10.1e": " inf+/- 1.0e+02", }, - (float('inf'), 1e8): { # Inf *nominal value* - '': 'inf+/-100000000.0', # Like '{}'.format(1e8) - 'g': '(inf+/-1)e+08', # Like '{:g}'.format(1e8) - '.1e': '(inf+/-1.0)e+08', - '.1E': '(%s+/-1.0)E+08' % Inf_EFG, - '.1ue': '(inf+/-1)e+08', - '10.1e': ' inf+/- 1.0e+08' # 'infe+08' would be strange + (float("inf"), 1e8): { # Inf *nominal value* + "": "inf+/-100000000.0", # Like '{}'.format(1e8) + "g": "(inf+/-1)e+08", # Like '{:g}'.format(1e8) + ".1e": "(inf+/-1.0)e+08", + ".1E": "(%s+/-1.0)E+08" % Inf_EFG, + ".1ue": "(inf+/-1)e+08", + "10.1e": " inf+/- 1.0e+08", # 'infe+08' would be strange }, - (float('inf'), 123456789): { # Inf *nominal value* - '': 'inf+/-123456789.0', # Similar to '{}'.format(123456789.) - 'g': '(inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) - '.1e': '(inf+/-1.2)e+08', - '.1ep': '(inf+/-1.2)e+08', - '.1E': '(%s+/-1.2)E+08' % Inf_EFG, - '.1ue': '(inf+/-1)e+08', - '.1ueL': r'\left(\infty \pm 1\right) \times 10^{8}', - '.1ueLp': r'\left(\infty \pm 1\right) \times 10^{8}', - '10.1e': ' inf+/- 1.2e+08', - '10.1eL': r' \infty \pm 1.2 \times 10^{8}' + (float("inf"), 123456789): { # Inf *nominal value* + "": "inf+/-123456789.0", # Similar to '{}'.format(123456789.) + "g": "(inf+/-1.23457)e+08", # Similar to '{:g}'.format(123456789.) + ".1e": "(inf+/-1.2)e+08", + ".1ep": "(inf+/-1.2)e+08", + ".1E": "(%s+/-1.2)E+08" % Inf_EFG, + ".1ue": "(inf+/-1)e+08", + ".1ueL": r"\left(\infty \pm 1\right) \times 10^{8}", + ".1ueLp": r"\left(\infty \pm 1\right) \times 10^{8}", + "10.1e": " inf+/- 1.2e+08", + "10.1eL": r" \infty \pm 1.2 \times 10^{8}", }, - (float('inf'), float('inf')): { # *Double* Inf - '': 'inf+/-inf', - '.1e': 'inf+/-inf', - '.1E': '%s+/-%s' % (Inf_EFG, Inf_EFG), - '.1ue': 'inf+/-inf', - 'EL': r'\infty \pm \infty', - 'ELp': r'\left(\infty \pm \infty\right)', + (float("inf"), float("inf")): { # *Double* Inf + "": "inf+/-inf", + ".1e": "inf+/-inf", + ".1E": "%s+/-%s" % (Inf_EFG, Inf_EFG), + ".1ue": "inf+/-inf", + "EL": r"\infty \pm \infty", + "ELp": r"\left(\infty \pm \infty\right)", }, - # Like the tests for +infinity, but for -infinity: - (float('-inf'), 100): { # Inf *nominal value* - '': '-inf+/-100.0', # Like '{}'.format(100.) - 'g': '-inf+/-100', # Like '{:g}'.format(100.) - '.1e': '(-inf+/-1.0)e+02', # Similar to 1±inf - '.1E': '(-%s+/-1.0)E+02' % Inf_EFG, - '.1ue': '(-inf+/-1)e+02', - '10.1e': ' -inf+/- 1.0e+02' + (float("-inf"), 100): { # Inf *nominal value* + "": "-inf+/-100.0", # Like '{}'.format(100.) + "g": "-inf+/-100", # Like '{:g}'.format(100.) + ".1e": "(-inf+/-1.0)e+02", # Similar to 1±inf + ".1E": "(-%s+/-1.0)E+02" % Inf_EFG, + ".1ue": "(-inf+/-1)e+02", + "10.1e": " -inf+/- 1.0e+02", }, - (float('-inf'), 1e8): { # Inf *nominal value* - '': '-inf+/-100000000.0', # Like '{}'.format(1e8) - 'g': '(-inf+/-1)e+08', # Like '{:g}'.format(1e8) - '.1e': '(-inf+/-1.0)e+08', - '.1E': '(-%s+/-1.0)E+08' % Inf_EFG, - '.1ue': '(-inf+/-1)e+08', - '10.1e': ' -inf+/- 1.0e+08' # 'infe+08' would be strange + (float("-inf"), 1e8): { # Inf *nominal value* + "": "-inf+/-100000000.0", # Like '{}'.format(1e8) + "g": "(-inf+/-1)e+08", # Like '{:g}'.format(1e8) + ".1e": "(-inf+/-1.0)e+08", + ".1E": "(-%s+/-1.0)E+08" % Inf_EFG, + ".1ue": "(-inf+/-1)e+08", + "10.1e": " -inf+/- 1.0e+08", # 'infe+08' would be strange }, - (float('-inf'), 123456789): { # Inf *nominal value* - '': '-inf+/-123456789.0', # Similar to '{}'.format(123456789.) - 'g': '(-inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.) - '.1e': '(-inf+/-1.2)e+08', - '.1E': '(-%s+/-1.2)E+08' % Inf_EFG, - '.1ue': '(-inf+/-1)e+08', - '.1ueL': r'\left(-\infty \pm 1\right) \times 10^{8}', - '10.1e': ' -inf+/- 1.2e+08', - '10.1eL': r' -\infty \pm 1.2 \times 10^{8}' + (float("-inf"), 123456789): { # Inf *nominal value* + "": "-inf+/-123456789.0", # Similar to '{}'.format(123456789.) + "g": "(-inf+/-1.23457)e+08", # Similar to '{:g}'.format(123456789.) + ".1e": "(-inf+/-1.2)e+08", + ".1E": "(-%s+/-1.2)E+08" % Inf_EFG, + ".1ue": "(-inf+/-1)e+08", + ".1ueL": r"\left(-\infty \pm 1\right) \times 10^{8}", + "10.1e": " -inf+/- 1.2e+08", + "10.1eL": r" -\infty \pm 1.2 \times 10^{8}", }, - (float('-inf'), float('inf')): { # *Double* Inf - '': '-inf+/-inf', - '.1e': '-inf+/-inf', - '.1E': '-%s+/-%s' % (Inf_EFG, Inf_EFG), - '.1ue': '-inf+/-inf', - 'EL': r'-\infty \pm \infty' + (float("-inf"), float("inf")): { # *Double* Inf + "": "-inf+/-inf", + ".1e": "-inf+/-inf", + ".1E": "-%s+/-%s" % (Inf_EFG, Inf_EFG), + ".1ue": "-inf+/-inf", + "EL": r"-\infty \pm \infty", }, - # The Particle Data Group convention trumps the "at least one # digit past the decimal point" for Python floats, but only # with a non-zero uncertainty: - (724.2, 26.4): { - '': '724+/-26', - 'p': '(724+/-26)' - }, - (724, 0): { - '': '724.0+/-0' - }, - + (724.2, 26.4): {"": "724+/-26", "p": "(724+/-26)"}, + (724, 0): {"": "724.0+/-0"}, # More NaN and infinity, in particular with LaTeX and various # options: - (float('-inf'), float('inf')): { - 'S': '-inf(inf)', - 'LS': r'-\infty(\infty)', - 'L': r'-\infty \pm \infty', - 'LP': r'-\infty±\infty', + (float("-inf"), float("inf")): { # noqa + "S": "-inf(inf)", + "LS": r"-\infty(\infty)", + "L": r"-\infty \pm \infty", + "LP": r"-\infty±\infty", # The following is consistent with Python's own # formatting, which depends on the version of Python: # formatting float("-inf") with format(..., "020") gives @@ -1583,55 +1570,46 @@ def test_format(): # code. It is thus best to mimic the native behavior of # none type formatting (even if it does not look so good # in Python 2.6). - '020S': format(float("-inf"), '015')+'(inf)' + "020S": format(float("-inf"), "015") + "(inf)", }, - (-float('nan'), float('inf')): { - 'S': 'nan(inf)', - 'LS': r'\mathrm{nan}(\infty)', - 'L': r'\mathrm{nan} \pm \infty', - 'LP': r'\mathrm{nan}±\infty' + (-float("nan"), float("inf")): { + "S": "nan(inf)", + "LS": r"\mathrm{nan}(\infty)", + "L": r"\mathrm{nan} \pm \infty", + "LP": r"\mathrm{nan}±\infty", }, - # Leading zeroes in the shorthand notation: - (-2, 3): { - "020S": "-000000000002.0(3.0)" - } - + (-2, 3): {"020S": "-000000000002.0(3.0)"}, } # ',' format option: introduced in Python 2.7 if sys.version_info >= (2, 7): - - tests.update({ - (1234.56789, 0.012): { - ',.1uf': '1,234.57+/-0.01' + tests.update( + { + (1234.56789, 0.012): {",.1uf": "1,234.57+/-0.01"}, + (123456.789123, 1234.5678): { + ",f": "123,457+/-1,235", # Particle Data Group convention + ",.4f": "123,456.7891+/-1,234.5678", }, - - (123456.789123, 1234.5678): { - ',f': '123,457+/-1,235', # Particle Data Group convention - ',.4f': '123,456.7891+/-1,234.5678' - } - }) + } + ) # True if we can detect that the Jython interpreter is running this code: try: - jython_detected = sys.subversion[0] == 'Jython' + jython_detected = sys.subversion[0] == "Jython" except AttributeError: jython_detected = False - for (values, representations) in tests.items(): - + for values, representations in tests.items(): value = ufloat(*values) - for (format_spec, result) in representations.items(): - + for format_spec, result in representations.items(): # print "FORMATTING {} WITH '{}'".format(repr(value), format_spec) # Jython 2.5.2 does not always represent NaN as nan or NAN # in the CPython way: for example, '%.2g' % float('nan') # is '\ufffd'. The test is skipped, in this case: - if jython_detected and ( - isnan(value.std_dev) or isnan(value.nominal_value)): + if jython_detected and (isnan(value.std_dev) or isnan(value.nominal_value)): continue # Call that works with Python < 2.6 too: @@ -1640,89 +1618,94 @@ def test_format(): assert representation == result, ( # The representation is used, for terminal that do not # support some characters like ±, and superscripts: - 'Incorrect representation %r for format %r of %r:' - ' %r expected.' - % (representation, format_spec, value, result)) + "Incorrect representation %r for format %r of %r:" + " %r expected." % (representation, format_spec, value, result) + ) # An empty format string is like calling str() # (http://docs.python.org/2/library/string.html#formatspec): if not format_spec: assert representation == str(value), ( - 'Empty format should give the same thing as str():' - ' %s obtained instead of %s' - % (representation, str(value))) + "Empty format should give the same thing as str():" + " %s obtained instead of %s" % (representation, str(value)) + ) # Parsing back into a number with uncertainty (unless the # LaTeX or comma notation is used): - if (not set(format_spec).intersection('L,*%') # * = fill with * + if ( + not set(format_spec).intersection("L,*%") # * = fill with * # "0nan" - and '0nan' not in representation.lower() + and "0nan" not in representation.lower() # "0inf" - and '0inf' not in representation.lower() + and "0inf" not in representation.lower() # Specific case: - and '=====' not in representation): - + and "=====" not in representation + ): value_back = ufloat_fromstr(representation) # The original number and the new one should be consistent # with each other: try: - # The nominal value can be rounded to 0 when the # uncertainty is larger (because p digits on the # uncertainty can still show 0.00... for the # nominal value). The relative error is infinite, # so this should not cause an error: if value_back.nominal_value: - assert numbers_close(value.nominal_value, - value_back.nominal_value, 2.4e-1) + assert numbers_close( + value.nominal_value, value_back.nominal_value, 2.4e-1 + ) # If the uncertainty is zero, then the relative # change can be large: - assert numbers_close(value.std_dev, - value_back.std_dev, 3e-1) + assert numbers_close(value.std_dev, value_back.std_dev, 3e-1) except AssertionError: # !! The following string formatting requires # str() to work (to not raise an exception) on the # values (which have a non-standard class): raise AssertionError( - 'Original value %s and value %s parsed from %r' - ' (obtained through format specification %r)' - ' are not close enough' - % (value, value_back, representation, format_spec)) + "Original value %s and value %s parsed from %r" + " (obtained through format specification %r)" + " are not close enough" + % (value, value_back, representation, format_spec) + ) + def test_unicode_format(): - '''Test of the unicode formatting of numbers with uncertainties''' + """Test of the unicode formatting of numbers with uncertainties""" x = ufloat(3.14159265358979, 0.25) - assert isinstance(u'Résultat = %s' % x.format(''), str) - assert isinstance(u'Résultat = %s' % x.format('P'), str) + assert isinstance("Résultat = %s" % x.format(""), str) + assert isinstance("Résultat = %s" % x.format("P"), str) + def test_custom_pretty_print_and_latex(): - '''Test of the pretty-print and LaTeX format customizations''' + """Test of the pretty-print and LaTeX format customizations""" - x = ufloat(2, 0.1)*1e-11 + x = ufloat(2, 0.1) * 1e-11 # We will later restore the defaults: PREV_CUSTOMIZATIONS = { var: getattr(formatting, var).copy() - for var in ['PM_SYMBOLS', 'MULT_SYMBOLS', 'GROUP_SYMBOLS']} - + for var in ["PM_SYMBOLS", "MULT_SYMBOLS", "GROUP_SYMBOLS"] + } + # Customizations: for format in ["pretty-print", "latex"]: - formatting.PM_SYMBOLS[format] = u" ± " - formatting.MULT_SYMBOLS[format] = u"⋅" - formatting.GROUP_SYMBOLS[format] = ( "[", "]" ) + formatting.PM_SYMBOLS[format] = " ± " + formatting.MULT_SYMBOLS[format] = "⋅" + formatting.GROUP_SYMBOLS[format] = ("[", "]") - assert u"{:P}".format(x) == u'[2.00 ± 0.10]⋅10⁻¹¹' - assert u"{:L}".format(x) == u'[2.00 ± 0.10] ⋅ 10^{-11}' + assert "{:P}".format(x) == "[2.00 ± 0.10]⋅10⁻¹¹" + assert "{:L}".format(x) == "[2.00 ± 0.10] ⋅ 10^{-11}" # We restore the defaults: - for (var, setting) in PREV_CUSTOMIZATIONS.items(): + for var, setting in PREV_CUSTOMIZATIONS.items(): setattr(formatting, var, setting) + ############################################################################### # The tests below require NumPy, which is an optional package: @@ -1783,8 +1766,8 @@ def test_correlated_values(): # u.nominal_value might return a float. The idea is to force # the new variable u2 to be defined through an integer nominal # value: - u2, = uncert_core.correlated_values([1], cov) - expr = 2*u2 # Calculations with u2 should be possible, like with u + (u2,) = uncert_core.correlated_values([1], cov) + expr = 2 * u2 # Calculations with u2 should be possible, like with u # noqa #################### @@ -1792,33 +1775,33 @@ def test_correlated_values(): x = ufloat(1, 0.1) y = ufloat(2, 0.3) - z = -3*x+y + z = -3 * x + y covs = uncert_core.covariance_matrix([x, y, z]) # Test of the diagonal covariance elements: assert uarrays_close( - numpy.array([v.std_dev**2 for v in (x, y, z)]), - numpy.array(covs).diagonal()) + numpy.array([v.std_dev**2 for v in (x, y, z)]), numpy.array(covs).diagonal() + ) # "Inversion" of the covariance matrix: creation of new # variables: (x_new, y_new, z_new) = uncert_core.correlated_values( [x.nominal_value, y.nominal_value, z.nominal_value], covs, - tags = ['x', 'y', 'z']) + tags=["x", "y", "z"], + ) # Even the uncertainties should be correctly reconstructed: - assert uarrays_close(numpy.array((x, y, z)), - numpy.array((x_new, y_new, z_new))) + assert uarrays_close(numpy.array((x, y, z)), numpy.array((x_new, y_new, z_new))) # ... and the covariances too: assert uarrays_close( numpy.array(covs), - numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new]))) + numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new])), + ) - assert uarrays_close( - numpy.array([z_new]), numpy.array([-3*x_new+y_new])) + assert uarrays_close(numpy.array([z_new]), numpy.array([-3 * x_new + y_new])) #################### @@ -1826,7 +1809,7 @@ def test_correlated_values(): u = ufloat(1, 0.05) v = ufloat(10, 0.1) - sum_value = u+2*v + sum_value = u + 2 * v # Covariance matrices: cov_matrix = uncert_core.covariance_matrix([u, v, sum_value]) @@ -1834,22 +1817,20 @@ def test_correlated_values(): # Correlated variables can be constructed from a covariance # matrix, if NumPy is available: (u2, v2, sum2) = uncert_core.correlated_values( - [x.nominal_value for x in [u, v, sum_value]], - cov_matrix) + [x.nominal_value for x in [u, v, sum_value]], cov_matrix + ) # uarrays_close() is used instead of numbers_close() because # it compares uncertainties too: assert uarrays_close(numpy.array([u]), numpy.array([u2])) assert uarrays_close(numpy.array([v]), numpy.array([v2])) assert uarrays_close(numpy.array([sum_value]), numpy.array([sum2])) - assert uarrays_close(numpy.array([0]), - numpy.array([sum2-(u2+2*v2)])) - + assert uarrays_close(numpy.array([0]), numpy.array([sum2 - (u2 + 2 * v2)])) # Spot checks of the correlation matrix: corr_matrix = uncert_core.correlation_matrix([u, v, sum_value]) - assert numbers_close(corr_matrix[0,0], 1) - assert numbers_close(corr_matrix[1,2], 2*v.std_dev/sum_value.std_dev) + assert numbers_close(corr_matrix[0, 0], 1) + assert numbers_close(corr_matrix[1, 2], 2 * v.std_dev / sum_value.std_dev) #################### @@ -1860,15 +1841,17 @@ def test_correlated_values(): cov[0, 1] = cov[1, 0] = 0.9e-70 cov[[0, 1], 2] = -3e-34 cov[2, [0, 1]] = -3e-34 - variables = uncert_core.correlated_values([0]*3, cov) + variables = uncert_core.correlated_values([0] * 3, cov) # Since the numbers are very small, we need to compare them # in a stricter way, that handles the case of a 0 variance # in `variables`: assert numbers_close( - 1e66*cov[0,0], 1e66*variables[0].s**2, tolerance=1e-5) + 1e66 * cov[0, 0], 1e66 * variables[0].s ** 2, tolerance=1e-5 + ) assert numbers_close( - 1e66*cov[1,1], 1e66*variables[1].s**2, tolerance=1e-5) + 1e66 * cov[1, 1], 1e66 * variables[1].s ** 2, tolerance=1e-5 + ) #################### @@ -1880,38 +1863,34 @@ def test_correlated_values(): nom_values = [1, 2, 3] variables = uncert_core.correlated_values(nom_values, cov) - for (variable, nom_value, variance) in zip( - variables, nom_values, cov.diagonal()): - + for variable, nom_value, variance in zip(variables, nom_values, cov.diagonal()): assert numbers_close(variable.n, nom_value) - assert numbers_close(variable.s**2, variance) - - assert uarrays_close( - cov, - numpy.array(uncert_core.covariance_matrix(variables))) + assert numbers_close(variable.s**2, variance) + + assert uarrays_close(cov, numpy.array(uncert_core.covariance_matrix(variables))) def test_correlated_values_correlation_mat(): - ''' + """ Tests the input of correlated value. Test through their correlation matrix (instead of the covariance matrix). - ''' + """ x = ufloat(1, 0.1) y = ufloat(2, 0.3) - z = -3*x+y + z = -3 * x + y cov_mat = uncert_core.covariance_matrix([x, y, z]) std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal()) - corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T + corr_mat = cov_mat / std_devs / std_devs[numpy.newaxis].T # We make sure that the correlation matrix is indeed diagonal: - assert (corr_mat-corr_mat.T).max() <= 1e-15 + assert (corr_mat - corr_mat.T).max() <= 1e-15 # We make sure that there are indeed ones on the diagonal: - assert (corr_mat.diagonal()-1).max() <= 1e-15 + assert (corr_mat.diagonal() - 1).max() <= 1e-15 # We try to recover the correlated variables through the # correlation matrix (not through the covariance matrix): @@ -1919,7 +1898,8 @@ def test_correlated_values_correlation_mat(): nominal_values = [v.nominal_value for v in (x, y, z)] std_devs = [v.std_dev for v in (x, y, z)] x2, y2, z2 = uncert_core.correlated_values_norm( - list(zip(nominal_values, std_devs)), corr_mat) + list(zip(nominal_values, std_devs)), corr_mat + ) # uarrays_close() is used instead of numbers_close() because # it compares uncertainties too: @@ -1930,9 +1910,10 @@ def test_correlated_values_correlation_mat(): assert uarrays_close(numpy.array([z]), numpy.array([z2])) # Partial correlation test: - assert uarrays_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)])) + assert uarrays_close(numpy.array([0]), numpy.array([z2 - (-3 * x2 + y2)])) # Test of the full covariance matrix: assert uarrays_close( numpy.array(cov_mat), - numpy.array(uncert_core.covariance_matrix([x2, y2, z2]))) + numpy.array(uncert_core.covariance_matrix([x2, y2, z2])), + ) diff --git a/tests/test_unumpy.py b/tests/test_unumpy.py index cea5d7da..783fd336 100644 --- a/tests/test_unumpy.py +++ b/tests/test_unumpy.py @@ -2,6 +2,7 @@ import numpy except ImportError: import sys + sys.exit() # There is no reason to test the interface to NumPy import uncertainties @@ -10,8 +11,8 @@ from uncertainties.unumpy import core from helpers import numbers_close, uarrays_close -def test_numpy(): +def test_numpy(): """ Interaction with NumPy, including matrix inversion, correlated_values, and calculation of the mean. @@ -22,15 +23,15 @@ def test_numpy(): # NumPy arrays can be multiplied by Variable objects, # whatever the order of the operands: - prod1 = arr*num - prod2 = num*arr + prod1 = arr * num + prod2 = num * arr # Additional check: assert (prod1 == prod2).all() # Operations with arrays work (they are first handled by NumPy, # then by this module): - prod1*prod2 # This should be calculable - assert not (prod1-prod2).any() # All elements must be 0 + prod1 * prod2 # This should be calculable + assert not (prod1 - prod2).any() # All elements must be 0 # Comparisons work too: @@ -39,7 +40,7 @@ def test_numpy(): # Comparisons with Variable objects: assert len(arr[arr > ufloat(1.5, 0.1)]) == 1 - assert len(prod1[prod1 < prod1*prod2]) == 2 + assert len(prod1[prod1 < prod1 * prod2]) == 2 # The following can be calculated (special NumPy abs() function): numpy.abs(arr + ufloat(-1, 0.1)) @@ -61,11 +62,12 @@ def test_numpy(): # Calculation of the mean, global and with a specific axis: arr_floats = numpy.random.random((10, 3, 5)) - arr = unumpy.uarray(arr_floats, arr_floats/100) + arr = unumpy.uarray(arr_floats, arr_floats / 100) assert arr.mean(axis=0).shape == (3, 5) assert arr.mean(axis=1).shape == (10, 5) arr.mean() # Global mean + def test_matrix(): "Matrices of numbers with uncertainties" # Matrix inversion: @@ -73,8 +75,7 @@ def test_matrix(): # Matrix with a mix of Variable objects and regular # Python numbers: - m = unumpy.matrix([[ufloat(10, 1), -3.1], - [0, ufloat(3, 0)]]) + m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # Test of the nominal_value attribute: @@ -83,8 +84,9 @@ def test_matrix(): assert type(m[0, 0]) == uncert_core.Variable # Test of scalar multiplication, both sides: - 3*m - m*3 + 3 * m + m * 3 + def derivatives_close(x, y): """ @@ -97,14 +99,15 @@ def derivatives_close(x, y): if set(x.derivatives) != set(y.derivatives): return False # Not the same variables - return all(numbers_close(x.derivatives[var], y.derivatives[var]) - for var in x.derivatives) + return all( + numbers_close(x.derivatives[var], y.derivatives[var]) for var in x.derivatives + ) + def test_inverse(): "Tests of the matrix inverse" - m = unumpy.matrix([[ufloat(10, 1), -3.1], - [0, ufloat(3, 0)]]) + m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # "Regular" inverse matrix, when uncertainties are not taken @@ -124,19 +127,19 @@ def test_inverse(): # Checks of the numerical values: the diagonal elements of the # inverse should be the inverses of the diagonal elements of # m (because we started with a triangular matrix): - assert numbers_close(1/m_nominal_values[0, 0], - m_inv_uncert[0, 0].nominal_value), "Wrong value" - - assert numbers_close(1/m_nominal_values[1, 1], - m_inv_uncert[1, 1].nominal_value), "Wrong value" + assert numbers_close( + 1 / m_nominal_values[0, 0], m_inv_uncert[0, 0].nominal_value + ), "Wrong value" + assert numbers_close( + 1 / m_nominal_values[1, 1], m_inv_uncert[1, 1].nominal_value + ), "Wrong value" #################### # Checks of the covariances between elements: x = ufloat(10, 1) - m = unumpy.matrix([[x, x], - [0, 3+2*x]]) + m = unumpy.matrix([[x, x], [0, 3 + 2 * x]]) m_inverse = m.I @@ -144,10 +147,8 @@ def test_inverse(): m_double_inverse = m_inverse.I # The initial matrix should be recovered, including its # derivatives, which define covariances: - assert numbers_close(m_double_inverse[0, 0].nominal_value, - m[0, 0].nominal_value) - assert numbers_close(m_double_inverse[0, 0].std_dev, - m[0, 0].std_dev) + assert numbers_close(m_double_inverse[0, 0].nominal_value, m[0, 0].nominal_value) + assert numbers_close(m_double_inverse[0, 0].std_dev, m[0, 0].std_dev) assert uarrays_close(m_double_inverse, m) @@ -166,18 +167,19 @@ def test_inverse(): # Correlations between m and m_inverse should create a perfect # inversion: - assert uarrays_close(m * m_inverse, numpy.eye(m.shape[0])) + assert uarrays_close(m * m_inverse, numpy.eye(m.shape[0])) + def test_wrap_array_func(): - ''' + """ Test of numpy.wrap_array_func(), with optional arguments and keyword arguments. - ''' + """ # Function that works with numbers with uncertainties in mat (if # mat is an uncertainties.unumpy.matrix): def f_unc(mat, *args, **kwargs): - return mat.I + args[0]*kwargs['factor'] + return mat.I + args[0] * kwargs["factor"] # Test with optional arguments and keyword arguments: def f(mat, *args, **kwargs): @@ -186,15 +188,12 @@ def f(mat, *args, **kwargs): assert not any(isinstance(v, uncert_core.UFloat) for v in mat.flat) return f_unc(mat, *args, **kwargs) - # Wrapped function: f_wrapped = core.wrap_array_func(f) ########## # Full rank rectangular matrix: - m = unumpy.matrix([[ufloat(10, 1), -3.1], - [0, ufloat(3, 0)], - [1, -3.1]]) + m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: @@ -212,9 +211,7 @@ def test_pseudo_inverse(): ########## # Full rank rectangular matrix: - m = unumpy.matrix([[ufloat(10, 1), -3.1], - [0, ufloat(3, 0)], - [1, -3.1]]) + m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: @@ -238,6 +235,7 @@ def test_pseudo_inverse(): m_pinv_package = core.pinv(m, rcond) assert uarrays_close(m_pinv_num, m_pinv_package) + def test_broadcast_funcs(): """ Test of mathematical functions that work with NumPy arrays of @@ -245,7 +243,7 @@ def test_broadcast_funcs(): """ x = ufloat(0.2, 0.1) - arr = numpy.array([x, 2*x]) + arr = numpy.array([x, 2 * x]) assert unumpy.cos(arr)[1] == uncertainties.umath.cos(arr[1]) # Some functions do not bear the same name in the math module and @@ -256,10 +254,11 @@ def test_broadcast_funcs(): # should have been renamed to arccos(). Starting with numpy 2 numpy.acos() # is an alias to numpy.arccos(). If similar aliases are added to unumpy, # the following tests can be removed. - assert not hasattr(unumpy, 'acos') + assert not hasattr(unumpy, "acos") # Test of the __all__ variable: - assert 'acos' not in unumpy.__all__ + assert "acos" not in unumpy.__all__ + def test_array_and_matrix_creation(): "Test of custom array creation" @@ -271,8 +270,9 @@ def test_array_and_matrix_creation(): # Same thing for matrices: mat = unumpy.umatrix([1, 2], [0.1, 0.2]) - assert mat[0,1].nominal_value == 2 - assert mat[0,1].std_dev == 0.2 + assert mat[0, 1].nominal_value == 2 + assert mat[0, 1].std_dev == 0.2 + def test_component_extraction(): "Extracting the nominal values and standard deviations from an array" @@ -299,4 +299,4 @@ def test_array_comparisons(): # For matrices, 1D arrays are converted to 2D arrays: mat = unumpy.umatrix([1, 2], [1, 4]) - assert numpy.all((mat == [mat[0,0], 4]) == [True, False]) + assert numpy.all((mat == [mat[0, 0], 4]) == [True, False]) diff --git a/uncertainties/__init__.py b/uncertainties/__init__.py index 3ba2306a..7b4fc17b 100644 --- a/uncertainties/__init__.py +++ b/uncertainties/__init__.py @@ -1,7 +1,7 @@ #!! Whenever the documentation below is updated, setup.py should be # checked for consistency. -''' +""" Calculations with full error propagation for quantities with uncertainties. Derivatives can also be calculated. @@ -219,13 +219,14 @@ This software is released under the BSD license. -''' +""" -from .core import * -from .core import __all__ # For a correct help(uncertainties) +from .core import * # noqa +from .core import __all__ # noqa For a correct help(uncertainties) + +from .version import __version__, __version_tuple__ # noqa -from .version import __version__, __version_tuple__ # for backward compatibility __version_info__ = __version_tuple__ -__author__ = 'Eric O. LEBIGOT (EOL) ' +__author__ = "Eric O. LEBIGOT (EOL) " diff --git a/uncertainties/core.py b/uncertainties/core.py index 9b5cb706..fdd64db7 100644 --- a/uncertainties/core.py +++ b/uncertainties/core.py @@ -14,68 +14,58 @@ from __future__ import division # Many analytical derivatives depend on this -from builtins import str, next, map, zip, range, object -import math -from math import sqrt, log, isnan, isinf # Optimization: no attribute look-up -import re -import sys +from builtins import str, zip, range, object +from math import sqrt, isnan, isinf # Optimization: no attribute look-up try: from math import isinfinite # !! Python 3.2+ except ImportError: + def isinfinite(x): return isinf(x) or isnan(x) + import copy -import warnings -import itertools -import inspect -import numbers import collections -from uncertainties.formatting import format_ufloat, nrmlze_superscript, MULT_SYMBOLS, format_num +from uncertainties.formatting import format_ufloat from uncertainties.parsing import str_to_number_with_uncert from . import ops from uncertainties.ops import ( _wrap, - IndexableIter, - nan_if_exception, partial_derivative, set_doc, - CONSTANT_TYPES, + nan_if_exception, modified_operators, - modified_ops_with_reflection + modified_ops_with_reflection, ) # Attributes that are always exported (some other attributes are # exported only if the NumPy module is available...): __all__ = [ - # All sub-modules and packages are not imported by default, # in particular because NumPy might be unavailable. - - 'ufloat', # Main function: returns a number with uncertainty - 'ufloat_fromstr', # Important function: returns a number with uncertainty - + "ufloat", # Main function: returns a number with uncertainty + "ufloat_fromstr", # Important function: returns a number with uncertainty # Uniform access to nominal values and standard deviations: - 'nominal_value', - 'std_dev', - + "nominal_value", + "std_dev", # Utility functions (more are exported if NumPy is present): - 'covariance_matrix', - + "covariance_matrix", # Class for testing whether an object is a number with # uncertainty. Not usually created by users (except through the # Variable subclass), but possibly manipulated by external code # ['derivatives()' method, etc.]. - 'UFloat', - 'Variable', - + "UFloat", + "Variable", # Wrapper for allowing non-pure-Python function to handle # quantitities with uncertainties: - 'wrap' - - ] + "wrap", + # used internally and will be removed by linter if not here + "nan_if_exception", + "modified_operators", + "modified_ops_with_reflection", +] ############################################################################### ## Definitions that depend on the availability of NumPy: @@ -86,7 +76,6 @@ def isinfinite(x): except ImportError: pass else: - # Entering variables as a block of correlated values. Only available # if NumPy is installed. @@ -136,20 +125,20 @@ def correlated_values(nom_values, covariance_mat, tags=None): # by dividing by standard deviations. We thus use specific # normalization values, with no null value: norm_vector = std_devs.copy() - norm_vector[norm_vector==0] = 1 + norm_vector[norm_vector == 0] = 1 return correlated_values_norm( # !! The following zip() is a bit suboptimal: correlated_values() # separates back the nominal values and the standard deviations: list(zip(nom_values, std_devs)), - covariance_mat/norm_vector/norm_vector[:,numpy.newaxis], - tags) + covariance_mat / norm_vector / norm_vector[:, numpy.newaxis], + tags, + ) - __all__.append('correlated_values') + __all__.append("correlated_values") - def correlated_values_norm(values_with_std_dev, correlation_mat, - tags=None): - ''' + def correlated_values_norm(values_with_std_dev, correlation_mat, tags=None): + """ Return correlated values like correlated_values(), but takes instead as input: @@ -170,7 +159,7 @@ def correlated_values_norm(values_with_std_dev, correlation_mat, NumPy array, etc.). tags -- like for correlated_values(). - ''' + """ # If no tags were given, we prepare tags for the newly created # variables: @@ -191,7 +180,7 @@ def correlated_values_norm(values_with_std_dev, correlation_mat, # Numerical errors might make some variances negative: we set # them to zero: - variances[variances < 0] = 0. + variances[variances < 0] = 0.0 # Creation of new, independent variables: @@ -201,7 +190,8 @@ def correlated_values_norm(values_with_std_dev, correlation_mat, variables = tuple( # The variables represent "pure" uncertainties: Variable(0, sqrt(variance), tag) - for (variance, tag) in zip(variances, tags)) + for (variance, tag) in zip(variances, tags) + ) # The coordinates of each new uncertainty as a function of the # new variables must include the variable scale (standard deviation): @@ -209,22 +199,23 @@ def correlated_values_norm(values_with_std_dev, correlation_mat, # Representation of the initial correlated values: values_funcs = tuple( - AffineScalarFunc( - value, - LinearCombination(dict(zip(variables, coords)))) - for (coords, value) in zip(transform, nominal_values)) + AffineScalarFunc(value, LinearCombination(dict(zip(variables, coords)))) + for (coords, value) in zip(transform, nominal_values) + ) return values_funcs - __all__.append('correlated_values_norm') + __all__.append("correlated_values_norm") ############################################################################### + class NumericalDerivatives(object): """ Convenient access to the partial derivatives of a function, calculated numerically. """ + # This is not a list because the number of arguments of the # function is not known in advance, in general. @@ -306,7 +297,6 @@ def expand(self): derivatives = collections.defaultdict(float) while self.linear_combo: # The list of terms is emptied progressively - # One of the terms is expanded or, if no expansion is # needed, simply added to the existing derivatives. # @@ -322,13 +312,13 @@ def expand(self): # print "MAINS", main_factor, main_expr if main_expr.expanded(): - for (var, factor) in main_expr.linear_combo.items(): - derivatives[var] += main_factor*factor + for var, factor in main_expr.linear_combo.items(): + derivatives[var] += main_factor * factor else: # Non-expanded form - for (factor, expr) in main_expr.linear_combo: + for factor, expr in main_expr.linear_combo: # The main_factor is applied to expr: - self.linear_combo.append((main_factor*factor, expr)) + self.linear_combo.append((main_factor * factor, expr)) # print "DERIV", derivatives @@ -341,6 +331,7 @@ def __getstate__(self): def __setstate__(self, state): (self.linear_combo,) = state + class AffineScalarFunc(object): """ Affine functions that support basic mathematical operations @@ -378,7 +369,7 @@ class AffineScalarFunc(object): """ # To save memory in large arrays: - __slots__ = ('_nominal_value', '_linear_part') + __slots__ = ("_nominal_value", "_linear_part") # !! Fix for mean() in NumPy 1.8.0: class dtype(object): @@ -481,8 +472,7 @@ def error_components(self): # Calculation of the variance: error_components = {} - for (variable, derivative) in self.derivatives.items(): - + for variable, derivative in self.derivatives.items(): # print "TYPE", type(variable), type(derivative) # Individual standard error due to variable: @@ -496,7 +486,7 @@ def error_components(self): # convention of this module? error_components[variable] = 0 else: - error_components[variable] = abs(derivative*variable._std_dev) + error_components[variable] = abs(derivative * variable._std_dev) return error_components @@ -512,13 +502,12 @@ def std_dev(self): objects) involved. """ #! It would be possible to not allow the user to update the - #std dev of Variable objects, in which case AffineScalarFunc - #objects could have a pre-calculated or, better, cached - #std_dev value (in fact, many intermediate AffineScalarFunc do - #not need to have their std_dev calculated: only the final - #AffineScalarFunc returned to the user does). - return float(sqrt(sum( - delta**2 for delta in self.error_components().values()))) + # std dev of Variable objects, in which case AffineScalarFunc + # objects could have a pre-calculated or, better, cached + # std_dev value (in fact, many intermediate AffineScalarFunc do + # not need to have their std_dev calculated: only the final + # AffineScalarFunc returned to the user does). + return float(sqrt(sum(delta**2 for delta in self.error_components().values()))) # Abbreviation (for formulas, etc.): s = std_dev @@ -538,7 +527,7 @@ def __repr__(self): if std_dev: std_dev_str = repr(std_dev) else: - std_dev_str = '0' + std_dev_str = "0" return "%r+/-%s" % (self.nominal_value, std_dev_str) @@ -546,12 +535,11 @@ def __str__(self): # An empty format string and str() usually return the same # string # (http://docs.python.org/2/library/string.html#format-specification-mini-language): - return self.format('') + return self.format("") @set_doc(format_ufloat.__doc__) def __format__(self, format_spec): return format_ufloat(self, format_spec) - @set_doc(""" Return the same result as self.__format__(format_spec), or @@ -563,7 +551,7 @@ def __format__(self, format_spec): """) def format(self, format_spec): return format_ufloat(self, format_spec) - + def std_score(self, value): """ Return 'value' - nominal value, in units of the standard @@ -576,8 +564,7 @@ def std_score(self, value): # here: return (value - self._nominal_value) / self.std_dev except ZeroDivisionError: - raise ValueError("The standard deviation is zero:" - " undefined result") + raise ValueError("The standard deviation is zero:" " undefined result") def __deepcopy__(self, memo): """ @@ -588,8 +575,7 @@ def __deepcopy__(self, memo): New variables are specially created for the returned AffineScalarFunc object. """ - return AffineScalarFunc(self._nominal_value, - copy.deepcopy(self._linear_part)) + return AffineScalarFunc(self._nominal_value, copy.deepcopy(self._linear_part)) def __getstate__(self): """ @@ -618,7 +604,7 @@ def __getstate__(self): # contains keys that are shadowed by slot names: try: - all_attrs['__dict__'] = self.__dict__ + all_attrs["__dict__"] = self.__dict__ except AttributeError: pass @@ -633,10 +619,9 @@ def __getstate__(self): all_slots = set() for cls in type(self).mro(): - # In the diamond inheritance pattern, some parent classes # may not have __slots__: - slot_names = getattr(cls, '__slots__', ()) + slot_names = getattr(cls, "__slots__", ()) # Slot names can be given in various forms (string, # sequence, iterable): @@ -662,14 +647,15 @@ def __setstate__(self, data_dict): """ Hook for the pickle module. """ - for (name, value) in data_dict.items(): + for name, value in data_dict.items(): # Contrary to the default __setstate__(), this does not # necessarily save to the instance dictionary (because the # instance might contain slots): setattr(self, name, value) + ops.add_arithmetic_ops(AffineScalarFunc) -ops.add_comparative_ops(AffineScalarFunc) +ops.add_comparative_ops(AffineScalarFunc) to_affine_scalar = AffineScalarFunc._to_affine_scalar # Nicer name, for users: isinstance(ufloat(...), UFloat) is @@ -677,6 +663,7 @@ def __setstate__(self, data_dict): # number with uncertainties from the uncertainties package?": UFloat = AffineScalarFunc + def wrap(f, derivatives_args=None, derivatives_kwargs=None): """Wrap a function f into one that accepts Variables. @@ -718,13 +705,23 @@ def wrap(f, derivatives_args=None, derivatives_kwargs=None): These will all give the same result. """ - return _wrap(AffineScalarFunc, f, derivatives_args=derivatives_args, derivatives_kwargs=derivatives_kwargs) + return _wrap( + AffineScalarFunc, + f, + derivatives_args=derivatives_args, + derivatives_kwargs=derivatives_kwargs, + ) + + ############################################################################### + class NegativeStdDev(Exception): - '''Raise for a negative standard deviation''' + """Raise for a negative standard deviation""" + pass + class Variable(AffineScalarFunc): """ Representation of a float-like scalar Variable with its uncertainty. @@ -734,7 +731,7 @@ class Variable(AffineScalarFunc): """ # To save memory in large arrays: - __slots__ = ('_std_dev', 'tag') + __slots__ = ("_std_dev", "tag") def __init__(self, value, std_dev, tag=None): """ @@ -772,7 +769,7 @@ def __init__(self, value, std_dev, tag=None): # takes much more memory. Thus, this implementation chooses # more cycles and a smaller memory footprint instead of no # cycles and a larger memory footprint. - super(Variable, self).__init__(value, LinearCombination({self: 1.})) + super(Variable, self).__init__(value, LinearCombination({self: 1.0})) self.std_dev = std_dev # Assignment through a Python property @@ -788,7 +785,6 @@ def std_dev(self): # std_dev of their Variables): @std_dev.setter def std_dev(self, std_dev): - # We force the error to be float-like. Since it is considered # as a standard deviation, it must be either positive or NaN: # (Note: if NaN < 0 is False, there is no need to test @@ -801,8 +797,7 @@ def std_dev(self, std_dev): # The following method is overridden so that we can represent the tag: def __repr__(self): - - num_repr = super(Variable, self).__repr__() + num_repr = super(Variable, self).__repr__() if self.tag is None: return num_repr @@ -855,6 +850,7 @@ def __deepcopy__(self, memo): # Utilities + def nominal_value(x): """ Return the nominal value of x if it is a quantity with @@ -870,6 +866,7 @@ def nominal_value(x): else: return x + def std_dev(x): """ Return the standard deviation of x if it is a quantity with @@ -883,7 +880,8 @@ def std_dev(x): if isinstance(x, AffineScalarFunc): return x.std_dev else: - return 0. + return 0.0 + def covariance_matrix(nums_with_uncert): """ @@ -905,49 +903,57 @@ def covariance_matrix(nums_with_uncert): # See PSI.411 in EOL's notes. covariance_matrix = [] - for (i1, expr1) in enumerate(nums_with_uncert, 1): + for i1, expr1 in enumerate(nums_with_uncert, 1): derivatives1 = expr1.derivatives # Optimization vars1 = set(derivatives1) # !! Python 2.7+: viewkeys() would work coefs_expr1 = [] for expr2 in nums_with_uncert[:i1]: derivatives2 = expr2.derivatives # Optimization - coefs_expr1.append(sum( - ((derivatives1[var]*derivatives2[var]*var._std_dev**2) - # var is a variable common to both numbers with - # uncertainties: - for var in vars1.intersection(derivatives2)), - # The result is always a float (sum() with no terms - # returns an integer): - 0.)) + coefs_expr1.append( + sum( + ( + (derivatives1[var] * derivatives2[var] * var._std_dev**2) + # var is a variable common to both numbers with + # uncertainties: + for var in vars1.intersection(derivatives2) + ), + # The result is always a float (sum() with no terms + # returns an integer): + 0.0, + ) + ) covariance_matrix.append(coefs_expr1) # We symmetrize the matrix: - for (i, covariance_coefs) in enumerate(covariance_matrix): - covariance_coefs.extend([covariance_matrix[j][i] - for j in range(i+1, len(covariance_matrix))]) + for i, covariance_coefs in enumerate(covariance_matrix): + covariance_coefs.extend( + [covariance_matrix[j][i] for j in range(i + 1, len(covariance_matrix))] + ) return covariance_matrix + try: import numpy except ImportError: pass else: + def correlation_matrix(nums_with_uncert): - ''' + """ Return the correlation matrix of the given sequence of numbers with uncertainties, as a NumPy array of floats. - ''' + """ cov_mat = numpy.array(covariance_matrix(nums_with_uncert)) std_devs = numpy.sqrt(cov_mat.diagonal()) - return cov_mat/std_devs/std_devs[numpy.newaxis].T + return cov_mat / std_devs / std_devs[numpy.newaxis].T - __all__.append('correlation_matrix') + __all__.append("correlation_matrix") def ufloat_fromstr(representation, tag=None): diff --git a/uncertainties/formatting.py b/uncertainties/formatting.py index ccd7d497..4049feb4 100644 --- a/uncertainties/formatting.py +++ b/uncertainties/formatting.py @@ -1,25 +1,25 @@ from math import isinf, isnan, isfinite import math import re -import warnings def first_digit(value): - ''' + """ Return the first digit position of the given value, as an integer. 0 is the digit just before the decimal point. Digits to the right of the decimal point have a negative position. Return 0 for a null value. - ''' + """ try: return int(math.floor(math.log10(abs(value)))) except ValueError: # Case of value == 0 return 0 + def PDG_precision(std_dev): - ''' + """ Return the number of significant digits to be used for the given standard deviation, according to the rounding rules of the Particle Data Group (2010) @@ -27,7 +27,7 @@ def PDG_precision(std_dev): Also returns the effective standard deviation to be used for display. - ''' + """ exponent = first_digit(std_dev) @@ -43,10 +43,10 @@ def PDG_precision(std_dev): # range for very small and very big floats is generally different. if exponent >= 0: # The -2 here means "take two additional digits": - (exponent, factor) = (exponent-2, 1) + (exponent, factor) = (exponent - 2, 1) else: - (exponent, factor) = (exponent+1, 1000) - digits = int(std_dev/10.**exponent*factor) # int rounds towards zero + (exponent, factor) = (exponent + 1, 1000) + digits = int(std_dev / 10.0**exponent * factor) # int rounds towards zero # Rules: if digits <= 354: @@ -56,7 +56,8 @@ def PDG_precision(std_dev): else: # The parentheses matter, for very small or very large # std_dev: - return (2, 10.**exponent*(1000/factor)) + return (2, 10.0**exponent * (1000 / factor)) + # Definition of a basic (format specification only, no full-feature # format string) formatting function that works whatever the version @@ -66,10 +67,11 @@ def PDG_precision(std_dev): # Exponent letter: the keys are the possible main_fmt_type values of # format_num(): -EXP_LETTERS = {'f': 'e', 'F': 'E'} +EXP_LETTERS = {"f": "e", "F": "E"} + def robust_align(orig_str, fill_char, align_option, width): - ''' + """ Aligns the given string with the given fill character. orig_str -- string to be aligned (str or unicode object). @@ -79,49 +81,51 @@ def robust_align(orig_str, fill_char, align_option, width): align_option -- as accepted by format(). wdith -- string that contains the width. - ''' + """ # print "ALIGNING", repr(orig_str), "WITH", fill_char+align_option, # print "WIDTH", width - return format(orig_str, fill_char+align_option+width) + return format(orig_str, fill_char + align_option + width) + # Maps some Unicode code points ("-", "+", and digits) to their # superscript version: TO_SUPERSCRIPT = { - 0x2b: u'⁺', - 0x2d: u'⁻', - 0x30: u'⁰', - 0x31: u'¹', - 0x32: u'²', - 0x33: u'³', - 0x34: u'⁴', - 0x35: u'⁵', - 0x36: u'⁶', - 0x37: u'⁷', - 0x38: u'⁸', - 0x39: u'⁹' - } + 0x2B: "⁺", + 0x2D: "⁻", + 0x30: "⁰", + 0x31: "¹", + 0x32: "²", + 0x33: "³", + 0x34: "⁴", + 0x35: "⁵", + 0x36: "⁶", + 0x37: "⁷", + 0x38: "⁸", + 0x39: "⁹", +} # Inverted TO_SUPERSCRIPT table, for use with unicode.translate(): # #! Python 2.7+ can use a dictionary comprehension instead: -FROM_SUPERSCRIPT = { - ord(sup): normal for (normal, sup) in TO_SUPERSCRIPT.items()} +FROM_SUPERSCRIPT = {ord(sup): normal for (normal, sup) in TO_SUPERSCRIPT.items()} + def to_superscript(value): - ''' + """ Return a (Unicode) string with the given value as superscript characters. The value is formatted with the %d %-operator format. value -- integer. - ''' + """ + + return ("%d" % value).translate(TO_SUPERSCRIPT) - return (u'%d' % value).translate(TO_SUPERSCRIPT) def nrmlze_superscript(number_str): - ''' + """ Return a string with superscript digits transformed into regular digits. Non-superscript digits are not changed before the conversion. Thus, the @@ -131,42 +135,45 @@ def nrmlze_superscript(number_str): number_str -- string to be converted (of type str, but also possibly, for Python 2, unicode, which allows this string to contain superscript digits). - ''' + """ # !! Python 3 doesn't need this str(), which is only here for giving the # .translate() method to str objects in Python 2 (this str() comes # from the builtins module of the future package and is therefore # a subclass of unicode, in Python 2): return int(str(number_str).translate(FROM_SUPERSCRIPT)) -PM_SYMBOLS = {'pretty-print': u'±', 'latex': r' \pm ', 'default': '+/-'} + +PM_SYMBOLS = {"pretty-print": "±", "latex": r" \pm ", "default": "+/-"} # Multiplication symbol for pretty printing (so that pretty printing can # be customized): -MULT_SYMBOLS = {'pretty-print': u'×', 'latex': r'\times'} +MULT_SYMBOLS = {"pretty-print": "×", "latex": r"\times"} # Function that transforms a numerical exponent produced by format_num() into # the corresponding string notation (for non-default modes): EXP_PRINT = { - 'pretty-print': lambda common_exp: u'%s10%s' % ( - MULT_SYMBOLS['pretty-print'], to_superscript(common_exp)), - 'latex': lambda common_exp: r' %s 10^{%d}' % ( - MULT_SYMBOLS['latex'], common_exp)} + "pretty-print": lambda common_exp: "%s10%s" + % (MULT_SYMBOLS["pretty-print"], to_superscript(common_exp)), + "latex": lambda common_exp: r" %s 10^{%d}" % (MULT_SYMBOLS["latex"], common_exp), +} # Symbols used for grouping (typically between parentheses) in format_num(): GROUP_SYMBOLS = { - 'pretty-print': ('(', ')'), + "pretty-print": ("(", ")"), # Because of possibly exponents inside the parentheses (case of a # specified field width), it is better to use auto-adjusting # parentheses. This has the side effect of making the part between # the parentheses non-breakable (the text inside parentheses in a # LaTeX math expression $...$ can be broken). - 'latex': (r'\left(', r'\right)'), - 'default': ('(', ')') # Basic text mode - } + "latex": (r"\left(", r"\right)"), + "default": ("(", ")"), # Basic text mode +} + -def format_num(nom_val_main, error_main, common_exp, - fmt_parts, prec, main_pres_type, options): - u''' +def format_num( + nom_val_main, error_main, common_exp, fmt_parts, prec, main_pres_type, options +): + """ Return a formatted number with uncertainty. Null errors (error_main) are displayed as the integer 0, with @@ -222,7 +229,7 @@ def format_num(nom_val_main, error_main, common_exp, be combined. The P option has priority over the L option (if both are given). For details, see the documentation for AffineScalarFunction.__format__(). - ''' + """ # print (nom_val_main, error_main, common_exp, # fmt_parts, prec, main_pres_type, options) @@ -251,33 +258,33 @@ def format_num(nom_val_main, error_main, common_exp, # the LaTeX mode. This setting does not apply to everything: for # example, NaN is formatted as \mathrm{nan} (or NAN) if the LaTeX # mode is required. - if 'P' in options: - print_type = 'pretty-print' - elif 'L' in options: - print_type = 'latex' + if "P" in options: + print_type = "pretty-print" + elif "L" in options: + print_type = "latex" else: - print_type = 'default' + print_type = "default" # Exponent part: if common_exp is None: - exp_str = '' - elif print_type == 'default': + exp_str = "" + elif print_type == "default": # Case of e or E. The same convention as Python 2.7 # to 3.3 is used for the display of the exponent: - exp_str = EXP_LETTERS[main_pres_type]+'%+03d' % common_exp + exp_str = EXP_LETTERS[main_pres_type] + "%+03d" % common_exp else: exp_str = EXP_PRINT[print_type](common_exp) # Possible % sign: - percent_str = '' - if '%' in options: - if 'L' in options: + percent_str = "" + if "%" in options: + if "L" in options: # % is a special character, in LaTeX: it must be escaped. # # Using '\\' in the code instead of r'\' so as not to # confuse emacs's syntax highlighting: - percent_str += ' \\' - percent_str += '%' + percent_str += " \\" + percent_str += "%" #################### @@ -288,15 +295,14 @@ def format_num(nom_val_main, error_main, common_exp, # Nicer representation of the main nominal part, with no trailing # zeros, when the error does not have a defined number of # significant digits: - if special_error and fmt_parts['type'] in ('', 'g', 'G'): + if special_error and fmt_parts["type"] in ("", "g", "G"): # The main part is between 1 and 10 because any possible # exponent is taken care of by common_exp, so it is # formatted without an exponent (otherwise, the exponent # would have to be handled for the LaTeX option): - fmt_suffix_n = (fmt_parts['prec'] or '')+fmt_parts['type'] + fmt_suffix_n = (fmt_parts["prec"] or "") + fmt_parts["type"] else: - fmt_suffix_n = '.%d%s' % (prec, main_pres_type) - + fmt_suffix_n = ".%d%s" % (prec, main_pres_type) # print "FMT_SUFFIX_N", fmt_suffix_n @@ -307,25 +313,22 @@ def format_num(nom_val_main, error_main, common_exp, # Error formatting: - - if 'S' in options: # Shorthand notation: - + if "S" in options: # Shorthand notation: # Calculation of the uncertainty part, uncert_str: if error_main == 0: # The error is exactly zero - uncert_str = '0' + uncert_str = "0" elif isnan(error_main): uncert_str = robust_format(error_main, main_pres_type) - if 'L' in options: - uncert_str = r'\mathrm{%s}' % uncert_str + if "L" in options: + uncert_str = r"\mathrm{%s}" % uncert_str elif isinf(error_main): - if 'L' in options: - uncert_str = r'\infty' + if "L" in options: + uncert_str = r"\infty" else: uncert_str = robust_format(error_main, main_pres_type) else: # Error with a meaningful first digit (not 0, and real number) - uncert = round(error_main, prec) # The representation uncert_str of the uncertainty (which will @@ -339,24 +342,24 @@ def format_num(nom_val_main, error_main, common_exp, if first_digit(uncert) >= 0 and prec > 0: # This case includes a zero rounded error with digits # after the decimal point: - uncert_str = '%.*f' % (prec, uncert) + uncert_str = "%.*f" % (prec, uncert) else: if uncert: # The round is important because 566.99999999 can # first be obtained when 567 is wanted (%d prints the # integer part, not the rounded value): - uncert_str = '%d' % round(uncert*10.**prec) + uncert_str = "%d" % round(uncert * 10.0**prec) else: # The decimal point indicates a truncated float # (this is easy to do, in this case, since # fmt_prefix_e is ignored): - uncert_str = '0.' + uncert_str = "0." # End of the final number representation (width and alignment # not included). This string is important for the handling of # the width: - value_end = '(%s)%s%s' % (uncert_str, exp_str, percent_str) + value_end = "(%s)%s%s" % (uncert_str, exp_str, percent_str) any_exp_factored = True # Single exponent in the output ########## @@ -365,60 +368,62 @@ def format_num(nom_val_main, error_main, common_exp, # Calculation of fmt_prefix_n (prefix for the format of the # main part of the nominal value): - if fmt_parts['zero'] and fmt_parts['width']: - + if fmt_parts["zero"] and fmt_parts["width"]: # Padding with zeros must be done on the nominal value alone: # Remaining width (for the nominal value): - nom_val_width = max(int(fmt_parts['width']) - len(value_end), 0) - fmt_prefix_n = '%s%s%d%s' % ( - fmt_parts['sign'], fmt_parts['zero'], nom_val_width, - fmt_parts['comma']) + nom_val_width = max(int(fmt_parts["width"]) - len(value_end), 0) + fmt_prefix_n = "%s%s%d%s" % ( + fmt_parts["sign"], + fmt_parts["zero"], + nom_val_width, + fmt_parts["comma"], + ) else: # Any 'zero' part should not do anything: it is not # included - fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] + fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n - nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n) + nom_val_str = robust_format(nom_val_main, fmt_prefix_n + fmt_suffix_n) ########## # Overriding of nom_val_str for LaTeX,; possibly based on the # existing value (for NaN vs nan): - if 'L' in options: - + if "L" in options: if isnan(nom_val_main): - nom_val_str = r'\mathrm{%s}' % nom_val_str + nom_val_str = r"\mathrm{%s}" % nom_val_str elif isinf(nom_val_main): # !! It is wasteful, in this case, to replace # nom_val_str: could this be avoided while avoiding to # duplicate the formula for nom_val_str for the common # case (robust_format(...))? - nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') + nom_val_str = r"%s\infty" % ("-" if nom_val_main < 0 else "") - value_str = nom_val_str+value_end + value_str = nom_val_str + value_end # Global width, if any: - if fmt_parts['width']: # An individual alignment is needed: - + if fmt_parts["width"]: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): value_str = robust_align( - value_str, fmt_parts['fill'], fmt_parts['align'] or '>', - fmt_parts['width']) + value_str, + fmt_parts["fill"], + fmt_parts["align"] or ">", + fmt_parts["width"], + ) else: # +/- notation: - # The common exponent is factored or not, depending on the # width. This gives nice columns for the nominal values and # the errors (no shift due to a varying exponent), when a need # is given: - any_exp_factored = not fmt_parts['width'] + any_exp_factored = not fmt_parts["width"] # True when the error part has any exponent directly attached # (case of an individual exponent for both the nominal value @@ -429,40 +434,41 @@ def format_num(nom_val_main, error_main, common_exp, # to have a zero uncertainty be very explicit): error_has_exp = not any_exp_factored and not special_error - # Like error_has_exp, but only for real number handling + # Like error_has_exp, but only for real number handling # (there is no special meaning to a zero nominal value): nom_has_exp = not any_exp_factored and isfinite(nom_val_main) # Prefix for the parts: - if fmt_parts['width']: # Individual widths - + if fmt_parts["width"]: # Individual widths # If zeros are needed, then the width is taken into # account now (before the exponent is added): - if fmt_parts['zero']: - - width = int(fmt_parts['width']) + if fmt_parts["zero"]: + width = int(fmt_parts["width"]) # Remaining (minimum) width after including the # exponent: - remaining_width = max(width-len(exp_str), 0) + remaining_width = max(width - len(exp_str), 0) - fmt_prefix_n = '%s%s%d%s' % ( - fmt_parts['sign'], fmt_parts['zero'], + fmt_prefix_n = "%s%s%d%s" % ( + fmt_parts["sign"], + fmt_parts["zero"], remaining_width if nom_has_exp else width, - fmt_parts['comma']) + fmt_parts["comma"], + ) - fmt_prefix_e = '%s%d%s' % ( - fmt_parts['zero'], + fmt_prefix_e = "%s%d%s" % ( + fmt_parts["zero"], remaining_width if error_has_exp else width, - fmt_parts['comma']) + fmt_parts["comma"], + ) else: - fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] - fmt_prefix_e = fmt_parts['comma'] + fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] + fmt_prefix_e = fmt_parts["comma"] else: # Global width - fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] - fmt_prefix_e = fmt_parts['comma'] + fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] + fmt_prefix_e = fmt_parts["comma"] ## print "ANY_EXP_FACTORED", any_exp_factored ## print "ERROR_HAS_EXP", error_has_exp @@ -480,7 +486,7 @@ def format_num(nom_val_main, error_main, common_exp, # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n - nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n) + nom_val_str = robust_format(nom_val_main, fmt_prefix_n + fmt_suffix_n) # print "NOM_VAL_STR", nom_val_str @@ -499,31 +505,32 @@ def format_num(nom_val_main, error_main, common_exp, if error_main: # The handling of NaN/inf in the nominal value identical to # the handling of NaN/inf in the standard deviation: - if (not isfinite(nom_val_main) + if ( + not isfinite(nom_val_main) # Only some formats have a nicer representation: - and fmt_parts['type'] in ('', 'g', 'G')): + and fmt_parts["type"] in ("", "g", "G") + ): # The error can be formatted independently: - fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type'] + fmt_suffix_e = (fmt_parts["prec"] or "") + fmt_parts["type"] else: - fmt_suffix_e = '.%d%s' % (prec, main_pres_type) + fmt_suffix_e = ".%d%s" % (prec, main_pres_type) else: - fmt_suffix_e = '.0%s' % main_pres_type + fmt_suffix_e = ".0%s" % main_pres_type - error_str = robust_format(error_main, fmt_prefix_e+fmt_suffix_e) + error_str = robust_format(error_main, fmt_prefix_e + fmt_suffix_e) ########## # Overriding of nom_val_str and error_str for LaTeX: - if 'L' in options: - + if "L" in options: if isnan(nom_val_main): - nom_val_str = r'\mathrm{%s}' % nom_val_str + nom_val_str = r"\mathrm{%s}" % nom_val_str elif isinf(nom_val_main): - nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') + nom_val_str = r"%s\infty" % ("-" if nom_val_main < 0 else "") if isnan(error_main): - error_str = r'\mathrm{%s}' % error_str + error_str = r"\mathrm{%s}" % error_str elif isinf(error_main): - error_str = r'\infty' + error_str = r"\infty" if nom_has_exp: nom_val_str += exp_str @@ -533,23 +540,22 @@ def format_num(nom_val_main, error_main, common_exp, #################### # Final alignment of each field, if needed: - if fmt_parts['width']: # An individual alignment is needed: - + if fmt_parts["width"]: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): - effective_align = fmt_parts['align'] or '>' + effective_align = fmt_parts["align"] or ">" # robust_format() is used because it may handle alignment # options, where the % operator does not: nom_val_str = robust_align( - nom_val_str, fmt_parts['fill'], effective_align, - fmt_parts['width']) + nom_val_str, fmt_parts["fill"], effective_align, fmt_parts["width"] + ) error_str = robust_align( - error_str, fmt_parts['fill'], effective_align, - fmt_parts['width']) + error_str, fmt_parts["fill"], effective_align, fmt_parts["width"] + ) #################### pm_symbol = PM_SYMBOLS[print_type] # Shortcut @@ -567,33 +573,41 @@ def format_num(nom_val_main, error_main, common_exp, # percent sign handling because this sign may too need # parentheses. if any_exp_factored and common_exp is not None: # Exponent - value_str = ''.join(( - LEFT_GROUPING, - nom_val_str, pm_symbol, error_str, - RIGHT_GROUPING, - exp_str, percent_str)) + value_str = "".join( + ( + LEFT_GROUPING, + nom_val_str, + pm_symbol, + error_str, + RIGHT_GROUPING, + exp_str, + percent_str, + ) + ) else: # No exponent - value_str = ''.join([nom_val_str, pm_symbol, error_str]) + value_str = "".join([nom_val_str, pm_symbol, error_str]) if percent_str: - value_str = ''.join(( - LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str)) - elif 'p' in options: - value_str = ''.join((LEFT_GROUPING, value_str, RIGHT_GROUPING)) + value_str = "".join( + (LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str) + ) + elif "p" in options: + value_str = "".join((LEFT_GROUPING, value_str, RIGHT_GROUPING)) return value_str + def signif_dgt_to_limit(value, num_signif_d): - ''' + """ Return the precision limit necessary to display value with num_signif_d significant digits. The precision limit is given as -1 for 1 digit after the decimal point, 0 for integer rounding, etc. It can be positive. - ''' + """ fst_digit = first_digit(value) - limit_no_rounding = fst_digit-num_signif_d+1 + limit_no_rounding = fst_digit - num_signif_d + 1 # The number of significant digits of the uncertainty, when # rounded at this limit_no_rounding level, can be too large by 1 @@ -613,7 +627,7 @@ def signif_dgt_to_limit(value, num_signif_d): def format_ufloat(ufloat_to_format, format_spec): - ''' + """ Formats a number with uncertainty. The format specification are the same as for format() for @@ -714,7 +728,7 @@ def format_ufloat(ufloat_to_format, format_spec): Some details of the formatting can be customized as described in format_num(). - ''' + """ # Convention on limits "between" digits: 0 = exactly at the # decimal point, -1 = after the first decimal, 1 = before the @@ -732,7 +746,8 @@ def format_ufloat(ufloat_to_format, format_spec): # Format specification parsing: - match = re.match(r''' + match = re.match( + r""" (?P[^{}]??)(?P[<>=^]?) # fill cannot be { or } (?P[-+ ]?) (?P0?) @@ -743,18 +758,20 @@ def format_ufloat(ufloat_to_format, format_spec): # The type can be omitted. Options must not go here: (?P[eEfFgG%]??) # n not supported (?P[PSLp]*) # uncertainties-specific flags - $''', + $""", format_spec, - re.VERBOSE) + re.VERBOSE, + ) # Does the format specification look correct? if not match: raise ValueError( - 'Format specification %r cannot be used with object of type' - ' %r. Note that uncertainties-specific flags must be put at' - ' the end of the format string.' + "Format specification %r cannot be used with object of type" + " %r. Note that uncertainties-specific flags must be put at" + " the end of the format string." # Sub-classes handled: - % (format_spec, ufloat_to_format.__class__.__name__)) + % (format_spec, ufloat_to_format.__class__.__name__) + ) # Effective format presentation type: f, e, g, etc., or None, # like in @@ -763,10 +780,10 @@ def format_ufloat(ufloat_to_format, format_spec): # None is "the same as 'g'": "{}".format() and "{:g}" do not # give the same result, on 31415000000.0. None is thus kept as # is instead of being replaced by "g". - pres_type = match.group('type') or None + pres_type = match.group("type") or None # Shortcut: - fmt_prec = match.group('prec') # Can be None + fmt_prec = match.group("prec") # Can be None ######################################## @@ -779,13 +796,13 @@ def format_ufloat(ufloat_to_format, format_spec): std_dev = ufloat_to_format.std_dev # 'options' is the options that must be given to format_num(): - options = set(match.group('options')) + options = set(match.group("options")) ######################################## # The '%' format is treated internally as a display option: it # should not be applied individually to each part: - if pres_type == '%': + if pres_type == "%": # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is @@ -795,8 +812,8 @@ def format_ufloat(ufloat_to_format, format_spec): # multiplication. std_dev *= 100 nom_val *= 100 - pres_type = 'f' - options.add('%') + pres_type = "f" + options.add("%") # At this point, pres_type is in eEfFgG or None (not %). @@ -804,8 +821,7 @@ def format_ufloat(ufloat_to_format, format_spec): # Non-real values (nominal value or standard deviation) must # be handled in a specific way: - real_values = [value for value in [abs(nom_val), std_dev] - if isfinite(value)] + real_values = [value for value in [abs(nom_val), std_dev] if isfinite(value)] # Calculation of digits_limit, which defines the precision of # the nominal value and of the standard deviation (it can be @@ -813,7 +829,7 @@ def format_ufloat(ufloat_to_format, format_spec): # Reference value for the calculation of a possible exponent, # if needed: - if pres_type in (None, 'e', 'E', 'g', 'G'): + if pres_type in (None, "e", "E", "g", "G"): # Reference value for the exponent: the largest value # defines what the exponent will be (another convention # could have been chosen, like using the exponent of the @@ -829,21 +845,22 @@ def format_ufloat(ufloat_to_format, format_spec): # Should the precision be interpreted like for a float, or # should the number of significant digits on the uncertainty # be controlled? - if (( - # Default behavior: number of significant digits on the - # uncertainty controlled (if useful, i.e. only in - # situations where the nominal value and the standard - # error digits are truncated at the same place): - (not fmt_prec and len(real_values)==2) - or match.group('uncert_prec')) # Explicit control + if ( + ( + # Default behavior: number of significant digits on the + # uncertainty controlled (if useful, i.e. only in + # situations where the nominal value and the standard + # error digits are truncated at the same place): + (not fmt_prec and len(real_values) == 2) or match.group("uncert_prec") + ) # Explicit control # The number of significant digits of the uncertainty must # be meaningful, otherwise the position of the significant # digits of the uncertainty does not have a clear # meaning. This gives us the *effective* uncertainty # control mode: and std_dev - and isfinite(std_dev)): - + and isfinite(std_dev) + ): # The number of significant digits on the uncertainty is # controlled. @@ -859,15 +876,16 @@ def format_ufloat(ufloat_to_format, format_spec): if fmt_prec: num_signif_d = int(fmt_prec) # Can only be non-negative if not num_signif_d: - raise ValueError("The number of significant digits" - " on the uncertainty should be positive") + raise ValueError( + "The number of significant digits" + " on the uncertainty should be positive" + ) else: (num_signif_d, std_dev) = PDG_precision(std_dev) digits_limit = signif_dgt_to_limit(std_dev, num_signif_d) else: - # No control of the number of significant digits on the # uncertainty. @@ -891,25 +909,22 @@ def format_ufloat(ufloat_to_format, format_spec): else: prec = 6 - if pres_type in ('f', 'F'): - + if pres_type in ("f", "F"): digits_limit = -prec else: # Format type in None, eEgG - # We first calculate the number of significant digits # to be displayed (if possible): - if pres_type in ('e', 'E'): + if pres_type in ("e", "E"): # The precision is the number of significant # digits required - 1 (because there is a single # digit before the decimal point, which is not # included in the definition of the precision with # the e/E format type): - num_signif_digits = prec+1 + num_signif_digits = prec + 1 else: # Presentation type in None, g, G - # Effective format specification precision: the rule # of # http://docs.python.org/2.7/library/string.html#format-specification-mini-language @@ -936,7 +951,8 @@ def format_ufloat(ufloat_to_format, format_spec): digits_limit = ( signif_dgt_to_limit(exp_ref_value, num_signif_digits) if real_values - else None) + else None + ) ## print "DIGITS_LIMIT", digits_limit @@ -947,9 +963,9 @@ def format_ufloat(ufloat_to_format, format_spec): # True), 'common_exp' is set to the exponent that should be # used. - if pres_type in ('f', 'F'): + if pres_type in ("f", "F"): use_exp = False - elif pres_type in ('e', 'E'): + elif pres_type in ("e", "E"): if not real_values: use_exp = False else: @@ -961,7 +977,6 @@ def format_ufloat(ufloat_to_format, format_spec): common_exp = first_digit(round(exp_ref_value, -digits_limit)) else: # None, g, G - # The rules from # https://docs.python.org/3.4/library/string.html#format-specification-mini-language # are applied. @@ -994,7 +1009,7 @@ def format_ufloat(ufloat_to_format, format_spec): # The number of significant digits of the reference value # rounded at digits_limit is exponent-digits_limit+1: - if -4 <= common_exp < common_exp-digits_limit+1: + if -4 <= common_exp < common_exp - digits_limit + 1: use_exp = False else: use_exp = True @@ -1011,17 +1026,15 @@ def format_ufloat(ufloat_to_format, format_spec): # exponent should be used. if use_exp: - # Not 10.**(-common_exp), for limit values of common_exp: - factor = 10.**common_exp + factor = 10.0**common_exp - nom_val_mantissa = nom_val/factor - std_dev_mantissa = std_dev/factor + nom_val_mantissa = nom_val / factor + std_dev_mantissa = std_dev / factor # Limit for the last digit of the mantissas: signif_limit = digits_limit - common_exp else: # No common exponent - common_exp = None nom_val_mantissa = nom_val @@ -1035,7 +1048,7 @@ def format_ufloat(ufloat_to_format, format_spec): # Format of the main (i.e. with no exponent) parts (the None # presentation type is similar to the g format type): - main_pres_type = 'fF'[(pres_type or 'g').isupper()] + main_pres_type = "fF"[(pres_type or "g").isupper()] # The precision of the main parts must be adjusted so as # to take into account the special role of the decimal @@ -1057,9 +1070,7 @@ def format_ufloat(ufloat_to_format, format_spec): # digit past the decimal point" of Python # (https://docs.python.org/3.4/library/string.html#format-specification-mini-language). This # is only applied for null uncertainties. - prec = max(-signif_limit, - 1 if pres_type is None and not std_dev - else 0) + prec = max(-signif_limit, 1 if pres_type is None and not std_dev else 0) ## print "PREC", prec ######################################## @@ -1076,8 +1087,12 @@ def format_ufloat(ufloat_to_format, format_spec): # options)) # Final formatting: - return format_num(nom_val_mantissa, std_dev_mantissa, common_exp, - match.groupdict(), - prec=prec, - main_pres_type=main_pres_type, - options=options) \ No newline at end of file + return format_num( + nom_val_mantissa, + std_dev_mantissa, + common_exp, + match.groupdict(), + prec=prec, + main_pres_type=main_pres_type, + options=options, + ) diff --git a/uncertainties/ops.py b/uncertainties/ops.py index 3999b511..332435b3 100644 --- a/uncertainties/ops.py +++ b/uncertainties/ops.py @@ -1,23 +1,22 @@ # This file contains code for AffineScalarFunc's arithmetic and comparative ops. -from math import sqrt, log, isnan, isinf # Optimization: no attribute look-up +from math import sqrt, log # Optimization: no attribute look-up import sys import itertools from inspect import getfullargspec -import numbers +import numbers # Some types known to not depend on Variable objects are put in # CONSTANT_TYPES. The most common types can be put in front, as this # may slightly improve the execution speed. FLOAT_LIKE_TYPES = (numbers.Number,) -CONSTANT_TYPES = FLOAT_LIKE_TYPES+(complex,) +CONSTANT_TYPES = FLOAT_LIKE_TYPES + (complex,) try: import numpy except ImportError: pass else: - # NumPy numbers do not depend on Variable objects: FLOAT_LIKE_TYPES += (numpy.generic,) CONSTANT_TYPES += FLOAT_LIKE_TYPES[-1:] @@ -30,11 +29,14 @@ def set_doc(doc_string): It is useful for functions whose docstring is calculated (including string substitutions). """ + def set_doc_string(func): func.__doc__ = doc_string return func + return set_doc_string + # Some operators can have undefined derivatives but still give # meaningful values when some of their arguments have a zero # uncertainty. Such operators return NaN when their derivative is @@ -46,22 +48,23 @@ def set_doc_string(func): # Exception catching is used so as to not slow down regular # operation too much: + def nan_if_exception(f): - ''' + """ Wrapper around f(x, y) that let f return NaN when f raises one of a few numerical exceptions. - ''' + """ def wrapped_f(*args, **kwargs): try: return f(*args, **kwargs) except (ValueError, ZeroDivisionError, OverflowError): - return float('nan') + return float("nan") return wrapped_f -def get_ops_with_reflection(): +def get_ops_with_reflection(): """ Return operators with a reflection, along with their partial derivatives. @@ -90,30 +93,31 @@ def get_ops_with_reflection(): # to code, and execute relatively efficiently: derivatives_list = { - 'add': ("1.", "1."), + "add": ("1.", "1."), # 'div' is the '/' operator when __future__.division is not in # effect. Since '/' is applied to # AffineScalarFunc._nominal_value numbers, it is applied on # floats, and is therefore the "usual" mathematical division. - 'div': ("1/y", "-x/y**2"), - 'floordiv': ("0.", "0."), # Non exact: there is a discontinuity + "div": ("1/y", "-x/y**2"), + "floordiv": ("0.", "0."), # Non exact: there is a discontinuity # The derivative wrt the 2nd arguments is something like (..., x//y), # but it is calculated numerically, for convenience: - 'mod': ("1.", "partial_derivative(float.__mod__, 1)(x, y)"), - 'mul': ("y", "x"), - 'sub': ("1.", "-1."), - 'truediv': ("1/y", "-x/y**2") - } + "mod": ("1.", "partial_derivative(float.__mod__, 1)(x, y)"), + "mul": ("y", "x"), + "sub": ("1.", "-1."), + "truediv": ("1/y", "-x/y**2"), + } # Conversion to Python functions: ops_with_reflection = {} - for (op, derivatives) in derivatives_list.items(): + for op, derivatives in derivatives_list.items(): ops_with_reflection[op] = [ - eval("lambda x, y: %s" % expr) for expr in derivatives ] - - ops_with_reflection["r"+op] = [ - eval("lambda y, x: %s" % expr) for expr in reversed(derivatives)] + eval("lambda x, y: %s" % expr) for expr in derivatives + ] + ops_with_reflection["r" + op] = [ + eval("lambda y, x: %s" % expr) for expr in reversed(derivatives) + ] # The derivatives of pow() are more complicated: @@ -126,32 +130,37 @@ def get_ops_with_reflection(): def pow_deriv_0(x, y): if y == 0: - return 0. + return 0.0 elif x != 0 or y % 1 == 0: - return y*x**(y-1) + return y * x ** (y - 1) else: - return float('nan') + return float("nan") def pow_deriv_1(x, y): if x == 0 and y > 0: - return 0. + return 0.0 else: - return log(x)*x**y + return log(x) * x**y - ops_with_reflection['pow'] = [pow_deriv_0, pow_deriv_1] - ops_with_reflection['rpow'] = [lambda y, x: pow_deriv_1(x, y), - lambda y, x: pow_deriv_0(x, y)] + ops_with_reflection["pow"] = [pow_deriv_0, pow_deriv_1] + ops_with_reflection["rpow"] = [ + lambda y, x: pow_deriv_1(x, y), + lambda y, x: pow_deriv_0(x, y), + ] # Undefined derivatives are converted to NaN when the function # itself can be calculated: - for op in ['pow']: + for op in ["pow"]: ops_with_reflection[op] = [ - nan_if_exception(func) for func in ops_with_reflection[op]] - ops_with_reflection['r'+op] = [ - nan_if_exception(func) for func in ops_with_reflection['r'+op]] + nan_if_exception(func) for func in ops_with_reflection[op] + ] + ops_with_reflection["r" + op] = [ + nan_if_exception(func) for func in ops_with_reflection["r" + op] + ] return ops_with_reflection + # Operators that have a reflection, along with their derivatives: ops_with_reflection = get_ops_with_reflection() @@ -163,28 +172,28 @@ def pow_deriv_1(x, y): # __*__ operators to AffineScalarFunc, the operators in custom_ops # are used): if sys.version_info < (3,): - custom_ops = {} else: - # !!! This code is not run by the tests. It would be nice to have # it be tested. def no_complex_result(func): - ''' + """ Return a function that does like func, but that raises a ValueError if the result is complex. - ''' + """ + def no_complex_func(*args, **kwargs): - ''' + """ Like %s, but raises a ValueError exception if the result is complex. - ''' % func.__name__ + """ % func.__name__ value = func(*args, **kwargs) if isinstance(value, complex): - raise ValueError('The uncertainties module does not handle' - ' complex results') + raise ValueError( + "The uncertainties module does not handle" " complex results" + ) else: return value @@ -194,9 +203,10 @@ def no_complex_func(*args, **kwargs): # complex results for the nominal value of some operations cannot # be calculated with an uncertainty: custom_ops = { - 'pow': no_complex_result(float.__pow__), - 'rpow': no_complex_result(float.__rpow__) - } + "pow": no_complex_result(float.__pow__), + "rpow": no_complex_result(float.__rpow__), + } + def add_arithmetic_ops(cls): """ @@ -215,30 +225,31 @@ def add_arithmetic_ops(cls): def _simple_add_deriv(x): if x >= 0: - return 1. + return 1.0 else: - return -1. + return -1.0 # Single-argument operators that should be adapted from floats to # AffineScalarFunc objects, associated to their derivative: simple_numerical_operators_derivatives = { - 'abs': _simple_add_deriv, - 'neg': lambda x: -1., - 'pos': lambda x: 1., - 'trunc': lambda x: 0. - } - - for (op, derivative) in ( - iter(simple_numerical_operators_derivatives.items())): + "abs": _simple_add_deriv, + "neg": lambda x: -1.0, + "pos": lambda x: 1.0, + "trunc": lambda x: 0.0, + } + for op, derivative in iter(simple_numerical_operators_derivatives.items()): attribute_name = "__%s__" % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __trunc__ was # introduced with Python 2.6): try: - setattr(cls, attribute_name, - _wrap(cls, getattr(float, attribute_name), [derivative])) + setattr( + cls, + attribute_name, + _wrap(cls, getattr(float, attribute_name), [derivative]), + ) except AttributeError: # Version of Python where floats don't have attribute_name: pass @@ -249,8 +260,8 @@ def _simple_add_deriv(x): # Final definition of the operators for AffineScalarFunc objects: # Reversed versions (useful for float*AffineScalarFunc, for instance): - for (op, derivatives) in ops_with_reflection.items(): - attribute_name = '__%s__' % op + for op, derivatives in ops_with_reflection.items(): + attribute_name = "__%s__" % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __div__ and @@ -268,26 +279,26 @@ def _simple_add_deriv(x): # Version of Python with floats that don't have attribute_name: pass else: - setattr(cls, attribute_name, - _wrap(cls, func_to_wrap, derivatives)) + setattr(cls, attribute_name, _wrap(cls, func_to_wrap, derivatives)) modified_ops_with_reflection.append(op) ######################################## # Conversions to pure numbers are meaningless. Note that the # behavior of float(1j) is similar. - for coercion_type in ('complex', 'int', 'long', 'float'): - def raise_error(self): - raise TypeError("can't convert an affine function (%s)" - ' to %s; use x.nominal_value' - # In case AffineScalarFunc is sub-classed: - % (self.__class__, coercion_type)) + for coercion_type in ("complex", "int", "long", "float"): - setattr(cls, '__%s__' % coercion_type, raise_error) + def raise_error(self): + raise TypeError( + "can't convert an affine function (%s)" + " to %s; use x.nominal_value" % (self.__class__, coercion_type) + # In case AffineScalarFunc is sub-classed: + ) + setattr(cls, "__%s__" % coercion_type, raise_error) class IndexableIter(object): - ''' + """ Iterable whose values can also be accessed through indexing. The input iterable values are cached. @@ -302,32 +313,28 @@ class IndexableIter(object): none_converter -- function that takes an index and returns the value to be returned when None is obtained form the iterable (instead of None). - ''' + """ def __init__(self, iterable, none_converter=lambda index: None): - ''' + """ iterable -- iterable whose values will be returned. none_converter -- function applied to None returned values. The value that replaces None is none_converter(index), where index is the index of the element. - ''' + """ self.iterable = iterable self.returned_elements = [] self.none_converter = none_converter def __getitem__(self, index): - returned_elements = self.returned_elements try: - return returned_elements[index] except IndexError: # Element not yet cached - - for pos in range(len(returned_elements), index+1): - + for pos in range(len(returned_elements), index + 1): value = next(self.iterable) if value is None: @@ -338,9 +345,10 @@ def __getitem__(self, index): return returned_elements[index] def __str__(self): - return '<%s: [%s...]>' % ( + return "<%s: [%s...]>" % ( self.__class__.__name__, - ', '.join(map(str, self.returned_elements))) + ", ".join(map(str, self.returned_elements)), + ) def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): @@ -352,8 +360,8 @@ def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): # Automatic addition of numerical derivatives in case the # supplied derivatives_args is shorter than the number of # arguments in *args: - itertools.chain(derivatives_args, itertools.repeat(None))) - + itertools.chain(derivatives_args, itertools.repeat(None)) + ) # Derivatives for keyword arguments (includes var-keyword # parameters **kwargs, but also var-or-keyword parameters, and @@ -361,8 +369,7 @@ def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): derivatives_all_kwargs = {} - for (name, derivative) in derivatives_kwargs.items(): - + for name, derivative in derivatives_kwargs.items(): # Optimization: None keyword-argument derivatives are converted # right away to derivatives (instead of doing this every time a # None derivative is encountered when calculating derivatives): @@ -390,8 +397,7 @@ def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): # arguments (and therefore to use inspect.getfullargspec()) # because they are already handled by derivatives_kwargs. - for (index, name) in enumerate(argspec.args): - + for index, name in enumerate(argspec.args): # The following test handles the case of # positional-or-keyword parameter for which automatic # numerical differentiation is used: when the wrapped @@ -414,23 +420,21 @@ def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): # function (instead of doing this over and over later every time a # None derivative is found): - none_converter = lambda index: partial_derivative(f, index) + none_converter = lambda index: partial_derivative(f, index) # noqa - for (index, derivative) in enumerate( - derivatives_args_index.returned_elements): + for index, derivative in enumerate(derivatives_args_index.returned_elements): if derivative is None: - derivatives_args_index.returned_elements[index] = ( - none_converter(index)) + derivatives_args_index.returned_elements[index] = none_converter(index) # Future None values are also automatically converted: derivatives_args_index.none_converter = none_converter - ## Wrapped function: #! Setting the doc string after "def f_with...()" does not # seem to work. We define it explicitly: - @set_doc("""\ + @set_doc( + """\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when @@ -442,9 +446,10 @@ def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): uncertainties.Variable objects will be incorrect. Original documentation: - %s""" % (f.__name__, f.__doc__)) + %s""" + % (f.__name__, f.__doc__) + ) def f_with_affine_output(*args, **kwargs): - ######################################## # The involved random variables must first be gathered, so # that they can be independently updated. @@ -454,10 +459,12 @@ def f_with_affine_output(*args, **kwargs): # replaced by their nominal value in order to calculate # the necessary derivatives of f. - pos_w_uncert = [index for (index, value) in enumerate(args) - if isinstance(value, cls)] - names_w_uncert = [key for (key, value) in kwargs.items() - if isinstance(value, cls)] + pos_w_uncert = [ + index for (index, value) in enumerate(args) if isinstance(value, cls) + ] + names_w_uncert = [ + key for (key, value) in kwargs.items() if isinstance(value, cls) + ] ######################################## # Value of f() at the nominal value of the arguments with @@ -522,14 +529,16 @@ def f_with_affine_output(*args, **kwargs): linear_part = [] for pos in pos_w_uncert: - linear_part.append(( - # Coefficient: - derivatives_args_index[pos](*args_values, **kwargs), - # Linear part of the AffineScalarFunc expression: - args[pos]._linear_part)) + linear_part.append( + ( + # Coefficient: + derivatives_args_index[pos](*args_values, **kwargs), + # Linear part of the AffineScalarFunc expression: + args[pos]._linear_part, + ) + ) for name in names_w_uncert: - # Optimization: caching of the automatic numerical # derivatives for keyword arguments that are # discovered. This gives a speedup when the original @@ -538,20 +547,24 @@ def f_with_affine_output(*args, **kwargs): derivative = derivatives_all_kwargs.setdefault( name, # Derivative never needed before: - partial_derivative(f, name)) - - linear_part.append(( - # Coefficient: - derivative(*args_values, **kwargs), - # Linear part of the AffineScalarFunc expression: - kwargs_uncert_values[name]._linear_part)) + partial_derivative(f, name), + ) + + linear_part.append( + ( + # Coefficient: + derivative(*args_values, **kwargs), + # Linear part of the AffineScalarFunc expression: + kwargs_uncert_values[name]._linear_part, + ) + ) # The function now returns the necessary linear approximation # to the function: - return cls( - f_nominal_value, linear_part) + return cls(f_nominal_value, linear_part) - f_with_affine_output = set_doc("""\ + f_with_affine_output = set_doc( + """\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when @@ -563,7 +576,9 @@ def f_with_affine_output(*args, **kwargs): uncertainties.Variable objects will be incorrect. Original documentation: - %s""" % (f.__name__, f.__doc__))(f_with_affine_output) + %s""" + % (f.__name__, f.__doc__) + )(f_with_affine_output) # It is easier to work with f_with_affine_output, which represents # a wrapped version of 'f', when it bears the same name as 'f': @@ -573,12 +588,12 @@ def f_with_affine_output(*args, **kwargs): return f_with_affine_output - # Step constant for numerical derivatives in # partial_derivative(). Value chosen to as to get better numerical # results: STEP_SIZE = sqrt(sys.float_info.epsilon) + # !! It would be possible to split the partial derivative calculation # into two functions: one for positional arguments (case of integer # arg_ref) and one for keyword arguments (case of string @@ -619,7 +634,7 @@ def partial_derivative_of_f(*args, **kwargs): # The step is relative to the parameter being varied, so that # shifting it does not suffer from finite precision limitations: - step = STEP_SIZE*abs(args_with_var[arg_ref]) + step = STEP_SIZE * abs(args_with_var[arg_ref]) if not step: # Arbitrary, but "small" with respect to 1: step = STEP_SIZE @@ -631,14 +646,14 @@ def partial_derivative_of_f(*args, **kwargs): else: shifted_f_plus = f(*args_with_var, **kwargs) - args_with_var[arg_ref] -= 2*step # Optimization: only 1 list copy + args_with_var[arg_ref] -= 2 * step # Optimization: only 1 list copy if change_kwargs: shifted_f_minus = f(*args, **args_with_var) else: shifted_f_minus = f(*args_with_var, **kwargs) - return (shifted_f_plus - shifted_f_minus)/2/step + return (shifted_f_plus - shifted_f_minus) / 2 / step return partial_derivative_of_f @@ -659,6 +674,7 @@ def partial_derivative_of_f(*args, **kwargs): # as explained in the main documentation, it is possible to give a # useful meaning to the comparison operators, in these cases. + def eq_on_aff_funcs(self, y_with_uncert): """ __eq__ operator, assuming that both self and y_with_uncert are @@ -667,7 +683,8 @@ def eq_on_aff_funcs(self, y_with_uncert): difference = self - y_with_uncert # Only an exact zero difference means that self and y are # equal numerically: - return not(difference._nominal_value or difference.std_dev) + return not (difference._nominal_value or difference.std_dev) + def ne_on_aff_funcs(self, y_with_uncert): """ @@ -677,6 +694,7 @@ def ne_on_aff_funcs(self, y_with_uncert): return not eq_on_aff_funcs(self, y_with_uncert) + def gt_on_aff_funcs(self, y_with_uncert): """ __gt__ operator, assuming that both self and y_with_uncert are @@ -684,14 +702,15 @@ def gt_on_aff_funcs(self, y_with_uncert): """ return self._nominal_value > y_with_uncert._nominal_value + def ge_on_aff_funcs(self, y_with_uncert): """ __ge__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ - return (gt_on_aff_funcs(self, y_with_uncert) - or eq_on_aff_funcs(self, y_with_uncert)) + return gt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert) + def lt_on_aff_funcs(self, y_with_uncert): """ @@ -700,17 +719,17 @@ def lt_on_aff_funcs(self, y_with_uncert): """ return self._nominal_value < y_with_uncert._nominal_value + def le_on_aff_funcs(self, y_with_uncert): """ __le__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ - return (lt_on_aff_funcs(self, y_with_uncert) - or eq_on_aff_funcs(self, y_with_uncert)) + return lt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert) -def add_comparative_ops(cls): +def add_comparative_ops(cls): def to_affine_scalar(x): """ Transforms x into a constant affine scalar function @@ -730,8 +749,10 @@ def to_affine_scalar(x): return cls(x, {}) # Case of lists, etc. - raise NotUpcast("%s cannot be converted to a number with" - " uncertainty" % type(x)) + raise NotUpcast( + "%s cannot be converted to a number with" " uncertainty" % type(x) + ) + cls._to_affine_scalar = to_affine_scalar def force_aff_func_args(func): @@ -791,9 +812,9 @@ def __bool__(self): # __nonzero__ works fine if subtracting the 0 float from a # vector of the linear space works as if 0 were the null # vector of that space): - return self != 0. # Uses the AffineScalarFunc.__ne__ function + return self != 0.0 # Uses the AffineScalarFunc.__ne__ function - cls.__bool__ = __bool__ + cls.__bool__ = __bool__ ######################################## ## Logical operators: warning: the resulting value cannot always @@ -838,8 +859,10 @@ def __bool__(self): cls.__lt__ = force_aff_func_args(lt_on_aff_funcs) cls.__le__ = force_aff_func_args(le_on_aff_funcs) + # Mathematical operations with local approximations (affine scalar # functions) + class NotUpcast(Exception): - 'Raised when an object cannot be converted to a number with uncertainty' \ No newline at end of file + "Raised when an object cannot be converted to a number with uncertainty" diff --git a/uncertainties/parsing.py b/uncertainties/parsing.py index 8d03cc84..bf176692 100644 --- a/uncertainties/parsing.py +++ b/uncertainties/parsing.py @@ -1,4 +1,4 @@ -import re +import re from uncertainties.formatting import nrmlze_superscript ############################################################################### @@ -10,7 +10,7 @@ # semantics of some representations (e.g. .1(2.) = .1+/-2, whereas # .1(2) = .1+/-0.2), so just getting the numerical value of the part # in parentheses would not be sufficient. -POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE = r'((\d*)(\.\d*)?|nan|NAN|inf|INF)' +POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE = r"((\d*)(\.\d*)?|nan|NAN|inf|INF)" # Regexp for a number with uncertainty (e.g., "-1.234(2)e-6"), where # the uncertainty is optional (in which case the uncertainty is @@ -18,7 +18,7 @@ # # !! WARNING: in Python 2, the code relies on "… % " returning # a Unicode string (even if the template is not Unicode): -NUMBER_WITH_UNCERT_RE_STR = u''' +NUMBER_WITH_UNCERT_RE_STR = """ ([+-])? # Sign %s # Main number (?:\\(%s\\))? # Optional uncertainty @@ -26,28 +26,36 @@ (?:[eE]|\\s*×\\s*10) (.*) )? # Optional exponent - ''' % (POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, - POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE) + """ % ( + POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, + POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, +) NUMBER_WITH_UNCERT_RE_MATCH = re.compile( - u"%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE).match + "%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE +).match # Number with uncertainty with a factored exponent (e.g., of the form # (... +/- ...)e10): this is a loose matching, so as to accommodate # for multiple formats: -NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH = re.compile(u''' +NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH = re.compile( + """ \\( (?P.*) \\) (?:[eE]|\\s*×\\s*10) (?P.*) - $''', re.VERBOSE).match + $""", + re.VERBOSE, +).match + class NotParenUncert(ValueError): - ''' + """ Raised when a string representing an exact number or a number with an uncertainty indicated between parentheses was expected but not found. - ''' + """ + def parse_error_in_parentheses(representation): # !!!! The code seems to handle superscript exponents, but the @@ -71,29 +79,31 @@ def parse_error_in_parentheses(representation): # The 'main' part is the nominal value, with 'int'eger part, and # 'dec'imal part. The 'uncert'ainty is similarly broken into its # integer and decimal parts. - (sign, main, _, main_dec, uncert, uncert_int, uncert_dec, - exponent) = match.groups() + (sign, main, _, main_dec, uncert, uncert_int, uncert_dec, exponent) = ( + match.groups() + ) else: - raise NotParenUncert("Unparsable number representation: '%s'." - " See the documentation of ufloat_fromstr()." - % representation) + raise NotParenUncert( + "Unparsable number representation: '%s'." + " See the documentation of ufloat_fromstr()." % representation + ) # Global exponent: if exponent: - factor = 10.**nrmlze_superscript(exponent) + factor = 10.0 ** nrmlze_superscript(exponent) else: factor = 1 # Nominal value: - value = float((sign or '')+main)*factor + value = float((sign or "") + main) * factor if uncert is None: # No uncertainty was found: an uncertainty of 1 on the last # digit is assumed: - uncert_int = '1' # The other parts of the uncertainty are None + uncert_int = "1" # The other parts of the uncertainty are None # Do we have a fully explicit uncertainty? - if uncert_dec is not None or uncert in {'nan', 'NAN', 'inf', 'INF'}: + if uncert_dec is not None or uncert in {"nan", "NAN", "inf", "INF"}: uncert_value = float(uncert) else: # uncert_int represents an uncertainty on the last digits: @@ -103,20 +113,22 @@ def parse_error_in_parentheses(representation): if main_dec is None: num_digits_after_period = 0 else: - num_digits_after_period = len(main_dec)-1 + num_digits_after_period = len(main_dec) - 1 - uncert_value = int(uncert_int)/10.**num_digits_after_period + uncert_value = int(uncert_int) / 10.0**num_digits_after_period # We apply the exponent to the uncertainty as well: uncert_value *= factor return (value, uncert_value) + # Regexp for catching the two variable parts of -1.2×10⁻¹²: -PRETTY_PRINT_MATCH = re.compile(u'(.*?)\\s*×\\s*10(.*)').match +PRETTY_PRINT_MATCH = re.compile("(.*?)\\s*×\\s*10(.*)").match + def to_float(value_str): - ''' + """ Converts a string representing a float to a float. The usual valid Python float() representations are correctly @@ -126,7 +138,7 @@ def to_float(value_str): converted. ValueError is raised if no float can be obtained. - ''' + """ try: return float(value_str) @@ -137,18 +149,22 @@ def to_float(value_str): match = PRETTY_PRINT_MATCH(value_str) if match: try: - return float(match.group(1))*10.**nrmlze_superscript(match.group(2)) + return float(match.group(1)) * 10.0 ** nrmlze_superscript(match.group(2)) except ValueError: - raise ValueError('Mantissa or exponent incorrect in pretty-print' - ' form %s' % value_str) + raise ValueError( + "Mantissa or exponent incorrect in pretty-print" " form %s" % value_str + ) else: - raise ValueError('No valid Python float or pretty-print form' - ' recognized in %s' % value_str) + raise ValueError( + "No valid Python float or pretty-print form" " recognized in %s" % value_str + ) cannot_parse_ufloat_msg_pat = ( - 'Cannot parse %s: see the documentation for ufloat_fromstr() for a' - ' list of accepted formats') + "Cannot parse %s: see the documentation for ufloat_fromstr() for a" + " list of accepted formats" +) + # The following function is not exposed because it can in effect be # obtained by doing x = ufloat_fromstr(representation) and reading @@ -171,7 +187,7 @@ def str_to_number_with_uncert(representation): # The "p" format can add parentheses around the whole printed result: we # remove them: - if representation.startswith('(') and representation.endswith(')'): + if representation.startswith("(") and representation.endswith(")"): representation = representation[1:-1] match = NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH(representation) @@ -180,33 +196,30 @@ def str_to_number_with_uncert(representation): # calculated: if match: - # We have a form with a factored exponent: (1.23 +/- 0.01)e10, # etc. - exp_value_str = match.group('exp_value') + exp_value_str = match.group("exp_value") try: exponent = nrmlze_superscript(exp_value_str) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) - factor = 10.**exponent + factor = 10.0**exponent - representation = match.group('simple_num_with_uncert') + representation = match.group("simple_num_with_uncert") else: factor = 1 # No global exponential factor - match = re.match(u'(.*)(?:\\+/-|±)(.*)', representation) + match = re.match("(.*)(?:\\+/-|±)(.*)", representation) if match: - (nom_value, uncert) = match.groups() try: # Simple form 1234.45+/-1.2 or 1234.45±1.2, or 1.23e-10+/-1e-23 # or -1.2×10⁻¹²±1e23: - parsed_value = (to_float(nom_value)*factor, - to_float(uncert)*factor) + parsed_value = (to_float(nom_value) * factor, to_float(uncert) * factor) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) diff --git a/uncertainties/umath.py b/uncertainties/umath.py index d9424d05..92d81367 100644 --- a/uncertainties/umath.py +++ b/uncertainties/umath.py @@ -1,4 +1,4 @@ -''' +""" Mathematical operations that generalize many operations from the standard math module so that they also work on numbers with uncertainties. @@ -33,7 +33,6 @@ This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original -author.''' +author.""" -from .umath_core import * -from .umath_core import __all__ # For a correct help(umath) +from .umath_core import * # noqa diff --git a/uncertainties/umath_core.py b/uncertainties/umath_core.py index fde92fe0..373f71fb 100644 --- a/uncertainties/umath_core.py +++ b/uncertainties/umath_core.py @@ -19,8 +19,7 @@ # Local modules import uncertainties.core as uncert_core -from uncertainties.core import (to_affine_scalar, AffineScalarFunc, - LinearCombination) +from uncertainties.core import to_affine_scalar, AffineScalarFunc, LinearCombination ############################################################################### @@ -59,7 +58,7 @@ # Functions with numerical derivatives: # # !! Python2.7+: {..., ...} -num_deriv_funcs = set(['fmod', 'gamma', 'lgamma']) +num_deriv_funcs = set(["fmod", "gamma", "lgamma"]) # Functions are by definition locally constant (on real # numbers): their value does not depend on the uncertainty (because @@ -71,21 +70,19 @@ # comparisons (==, >, etc.). # # !! Python 2.7+: {..., ...} -locally_cst_funcs = set(['ceil', 'floor', 'isinf', 'isnan', 'trunc']) +locally_cst_funcs = set(["ceil", "floor", "isinf", "isnan", "trunc"]) # Functions that do not belong in many_scalars_to_scalar_funcs, but # that have a version that handles uncertainties. These functions are # also not in numpy (see unumpy/core.py). non_std_wrapped_funcs = [] + # Function that copies the relevant attributes from generalized # functions from the math module: # This is a copy&paste job from the functools module, changing # the default arugment for assigned -def wraps(wrapper, - wrapped, - assigned=('__doc__',), - updated=('__dict__',)): +def wraps(wrapper, wrapped, assigned=("__doc__",), updated=("__dict__",)): """Update a wrapper function to look like the wrapped function. wrapper -- function to be updated @@ -123,12 +120,13 @@ def wraps(wrapper, # results when differentiated analytically, because of the loss of # precision in numerical calculations. -#def log_1arg_der(x): +# def log_1arg_der(x): # """ # Derivative of log(x) (1-argument form). # """ # return 1/x + def log_der0(*args): """ Derivative of math.log() with respect to its first argument. @@ -136,89 +134,94 @@ def log_der0(*args): Works whether 1 or 2 arguments are given. """ if len(args) == 1: - return 1/args[0] + return 1 / args[0] else: - return 1/args[0]/math.log(args[1]) # 2-argument form + return 1 / args[0] / math.log(args[1]) # 2-argument form # The following version goes about as fast: ## A 'try' is used for the most common case because it is fast when no ## exception is raised: - #try: + # try: # return log_1arg_der(*args) # Argument number check - #except TypeError: + # except TypeError: # return 1/args[0]/math.log(args[1]) # 2-argument form -def _deriv_copysign(x,y): + +def _deriv_copysign(x, y): if x >= 0: return math.copysign(1, y) else: return -math.copysign(1, y) + def _deriv_fabs(x): if x >= 0: return 1 else: return -1 + def _deriv_pow_0(x, y): if y == 0: - return 0. + return 0.0 elif x != 0 or y % 1 == 0: - return y*math.pow(x, y-1) + return y * math.pow(x, y - 1) else: - return float('nan') + return float("nan") + def _deriv_pow_1(x, y): if x == 0 and y > 0: - return 0. + return 0.0 else: return math.log(x) * math.pow(x, y) -erf_coef = 2/math.sqrt(math.pi) # Optimization for erf() + +erf_coef = 2 / math.sqrt(math.pi) # Optimization for erf() fixed_derivatives = { # In alphabetical order, here: - 'acos': [lambda x: -1/math.sqrt(1-x**2)], - 'acosh': [lambda x: 1/math.sqrt(x**2-1)], - 'asin': [lambda x: 1/math.sqrt(1-x**2)], - 'asinh': [lambda x: 1/math.sqrt(1+x**2)], - 'atan': [lambda x: 1/(1+x**2)], - 'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0 - lambda y, x: -y/(x**2+y**2)], # Correct for x == 0 - 'atanh': [lambda x: 1/(1-x**2)], - 'copysign': [_deriv_copysign, - lambda x, y: 0], - 'cos': [lambda x: -math.sin(x)], - 'cosh': [math.sinh], - 'degrees': [lambda x: math.degrees(1)], - 'erf': [lambda x: math.exp(-x**2)*erf_coef], - 'erfc': [lambda x: -math.exp(-x**2)*erf_coef], - 'exp': [math.exp], - 'expm1': [math.exp], - 'fabs': [_deriv_fabs], - 'hypot': [lambda x, y: x/math.hypot(x, y), - lambda x, y: y/math.hypot(x, y)], - 'log': [log_der0, - lambda x, y: -math.log(x, y)/y/math.log(y)], - 'log10': [lambda x: 1/x/math.log(10)], - 'log1p': [lambda x: 1/(1+x)], - 'pow': [_deriv_pow_0, _deriv_pow_1], - 'radians': [lambda x: math.radians(1)], - 'sin': [math.cos], - 'sinh': [math.cosh], - 'sqrt': [lambda x: 0.5/math.sqrt(x)], - 'tan': [lambda x: 1+math.tan(x)**2], - 'tanh': [lambda x: 1-math.tanh(x)**2] - } + "acos": [lambda x: -1 / math.sqrt(1 - x**2)], + "acosh": [lambda x: 1 / math.sqrt(x**2 - 1)], + "asin": [lambda x: 1 / math.sqrt(1 - x**2)], + "asinh": [lambda x: 1 / math.sqrt(1 + x**2)], + "atan": [lambda x: 1 / (1 + x**2)], + "atan2": [ + lambda y, x: x / (x**2 + y**2), # Correct for x == 0 + lambda y, x: -y / (x**2 + y**2), + ], # Correct for x == 0 + "atanh": [lambda x: 1 / (1 - x**2)], + "copysign": [_deriv_copysign, lambda x, y: 0], + "cos": [lambda x: -math.sin(x)], + "cosh": [math.sinh], + "degrees": [lambda x: math.degrees(1)], + "erf": [lambda x: math.exp(-(x**2)) * erf_coef], + "erfc": [lambda x: -math.exp(-(x**2)) * erf_coef], + "exp": [math.exp], + "expm1": [math.exp], + "fabs": [_deriv_fabs], + "hypot": [lambda x, y: x / math.hypot(x, y), lambda x, y: y / math.hypot(x, y)], + "log": [log_der0, lambda x, y: -math.log(x, y) / y / math.log(y)], + "log10": [lambda x: 1 / x / math.log(10)], + "log1p": [lambda x: 1 / (1 + x)], + "pow": [_deriv_pow_0, _deriv_pow_1], + "radians": [lambda x: math.radians(1)], + "sin": [math.cos], + "sinh": [math.cosh], + "sqrt": [lambda x: 0.5 / math.sqrt(x)], + "tan": [lambda x: 1 + math.tan(x) ** 2], + "tanh": [lambda x: 1 - math.tanh(x) ** 2], +} # Many built-in functions in the math module are wrapped with a # version which is uncertainty aware: this_module = sys.modules[__name__] + def wrap_locally_cst_func(func): - ''' + """ Return a function that returns the same arguments as func, but after converting any AffineScalarFunc object to its nominal value. @@ -226,19 +229,22 @@ def wrap_locally_cst_func(func): constant: the uncertainties should have no role in the result (since they are supposed to keep the function linear and hence, here, constant). - ''' + """ + def wrapped_func(*args, **kwargs): args_float = map(uncert_core.nominal_value, args) # !! In Python 2.7+, dictionary comprehension: {argname:...} kwargs_float = dict( (arg_name, uncert_core.nominal_value(value)) - for (arg_name, value) in kwargs.items()) + for (arg_name, value) in kwargs.items() + ) return func(*args_float, **kwargs_float) + return wrapped_func + # for (name, attr) in vars(math).items(): for name in dir(math): - if name in fixed_derivatives: # Priority to functions in fixed_derivatives derivatives = fixed_derivatives[name] elif name in num_deriv_funcs: @@ -258,7 +264,8 @@ def wrapped_func(*args, **kwargs): # that cannot be calculated indicates a non-defined derivative # (the derivatives in fixed_derivatives must be written this way): wrapped_func = uncert_core.wrap( - func, map(uncert_core.nan_if_exception, derivatives)) + func, map(uncert_core.nan_if_exception, derivatives) + ) # !! The same effect could be achieved with globals()[...] = ... setattr(this_module, name, wraps(wrapped_func, func)) @@ -286,13 +293,14 @@ def wrapped_func(*args, **kwargs): # For drop-in compatibility with the math module: factorial = math.factorial -non_std_wrapped_funcs.append('factorial') +non_std_wrapped_funcs.append("factorial") # We wrap math.fsum original_func = math.fsum # For optimization purposes + # The function below exists so that temporary variables do not # pollute the module namespace: def wrapped_fsum(): @@ -304,17 +312,16 @@ def wrapped_fsum(): # The fsum function is flattened, in order to use the # wrap() wrapper: - flat_fsum = lambda *args: original_func(args) + flat_fsum = lambda *args: original_func(args) # noqa + + flat_fsum_wrap = uncert_core.wrap(flat_fsum, itertools.repeat(lambda *args: 1)) - flat_fsum_wrap = uncert_core.wrap( - flat_fsum, itertools.repeat(lambda *args: 1)) + return wraps(lambda arg_list: flat_fsum_wrap(*arg_list), original_func) - return wraps(lambda arg_list: flat_fsum_wrap(*arg_list), - original_func) # !!!!!!!! Documented? fsum = wrapped_fsum() -non_std_wrapped_funcs.append('fsum') +non_std_wrapped_funcs.append("fsum") ########## @@ -325,6 +332,7 @@ def wrapped_fsum(): # ! The arguments have the same names as in the math module # documentation, so that the docstrings are consistent with them. + @uncert_core.set_doc(math.modf.__doc__) def modf(x): """ @@ -348,7 +356,10 @@ def modf(x): # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return (frac_part, int_part) -many_scalars_to_scalar_funcs.append('modf') + + +many_scalars_to_scalar_funcs.append("modf") + @uncert_core.set_doc(math.ldexp.__doc__) def ldexp(x, i): @@ -361,7 +372,8 @@ def ldexp(x, i): if aff_func._linear_part: return AffineScalarFunc( math.ldexp(aff_func.nominal_value, i), - LinearCombination([(2**i, aff_func._linear_part)])) + LinearCombination([(2**i, aff_func._linear_part)]), + ) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: @@ -372,7 +384,10 @@ def ldexp(x, i): # value of x coerced to a difference type [int->float, for # instance]): return math.ldexp(x, i) -many_scalars_to_scalar_funcs.append('ldexp') + + +many_scalars_to_scalar_funcs.append("ldexp") + @uncert_core.set_doc(math.frexp.__doc__) def frexp(x): @@ -395,15 +410,19 @@ def frexp(x): # With frexp(x) = (m, e), x = m*2**e, so m = x*2**-e # and therefore dm/dx = 2**-e (as e in an integer that # does not vary when x changes): - LinearCombination([2**-exponent, aff_func._linear_part])), + LinearCombination([2**-exponent, aff_func._linear_part]), + ), # The exponent is an integer and is supposed to be # continuous (errors must be small): - exponent) + exponent, + ) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return math.frexp(x) -non_std_wrapped_funcs.append('frexp') + + +non_std_wrapped_funcs.append("frexp") ############################################################################### # Exported functions: diff --git a/uncertainties/unumpy/__init__.py b/uncertainties/unumpy/__init__.py index f8fe976f..1d1073b4 100644 --- a/uncertainties/unumpy/__init__.py +++ b/uncertainties/unumpy/__init__.py @@ -70,11 +70,11 @@ # Local modules: -from .core import * -from . import ulinalg # Local sub-module +from .core import * # noqa +from . import ulinalg # noqa Local sub-module # __all__ is set so that pydoc shows all important functions: -__all__ = core.__all__ +__all__ = core.__all__ # noqa # "import numpy" makes numpy.linalg available. This behavior is # copied here, for maximum compatibility: -__all__.append('ulinalg') +__all__.append("ulinalg") diff --git a/uncertainties/unumpy/core.py b/uncertainties/unumpy/core.py index 861339fc..87e39ed1 100644 --- a/uncertainties/unumpy/core.py +++ b/uncertainties/unumpy/core.py @@ -27,14 +27,14 @@ __all__ = [ # Factory functions: - 'uarray', 'umatrix', - + "uarray", + "umatrix", # Utilities: - 'nominal_values', 'std_devs', - + "nominal_values", + "std_devs", # Classes: - 'matrix' - ] + "matrix", +] ############################################################################### # Utilities: @@ -57,14 +57,21 @@ to_nominal_values = numpy.vectorize( uncert_core.nominal_value, otypes=[float], # Because vectorize() has side effects (dtype setting) - doc=("Return the nominal value of the numbers with uncertainties contained" - " in a NumPy (or unumpy) array (this includes matrices).")) + doc=( + "Return the nominal value of the numbers with uncertainties contained" + " in a NumPy (or unumpy) array (this includes matrices)." + ), +) to_std_devs = numpy.vectorize( uncert_core.std_dev, otypes=[float], # Because vectorize() has side effects (dtype setting) - doc=("Return the standard deviation of the numbers with uncertainties" - " contained in a NumPy array, or zero for other objects.")) + doc=( + "Return the standard deviation of the numbers with uncertainties" + " contained in a NumPy array, or zero for other objects." + ), +) + def unumpy_to_numpy_matrix(arr): """ @@ -77,6 +84,7 @@ def unumpy_to_numpy_matrix(arr): else: return arr + def nominal_values(arr): """ Return the nominal values of the numbers in NumPy array arr. @@ -93,6 +101,7 @@ class from this module) are passed through untouched (because a return unumpy_to_numpy_matrix(to_nominal_values(arr)) + def std_devs(arr): """ Return the standard deviations of the numbers in NumPy array arr. @@ -109,8 +118,10 @@ class from this module) are passed through untouched (because a return unumpy_to_numpy_matrix(to_std_devs(arr)) + ############################################################################### + def derivative(u, var): """ Return the derivative of u along var, if u is an @@ -121,9 +132,10 @@ def derivative(u, var): try: return u.derivatives[var] except KeyError: - return 0. + return 0.0 else: - return 0. + return 0.0 + def wrap_array_func(func): # !!! This function is not used in the code, except in the tests. @@ -148,7 +160,8 @@ def wrap_array_func(func): and which returns a NumPy array. """ - @uncert_core.set_doc("""\ + @uncert_core.set_doc( + """\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. @@ -158,7 +171,9 @@ def wrap_array_func(func): uncert_core.Variable objects will be incorrect. Original documentation: - %s""" % (func.__name__, func.__doc__)) + %s""" + % (func.__name__, func.__doc__) + ) def wrapped_func(arr, *args, **kwargs): # Nominal value: arr_nominal_value = nominal_values(arr) @@ -191,7 +206,6 @@ def wrapped_func(arr, *args, **kwargs): # in the case of the pseudo-inverse). derivatives = numpy.vectorize(lambda _: {})(func_nominal_value) for var in variables: - # A basic assumption of this package is that the user # guarantees that uncertainties cover a zone where # evaluated functions are linear enough. Thus, numerical @@ -214,7 +228,7 @@ def wrapped_func(arr, *args, **kwargs): # for the evaluation of the derivative, though: we set the # minimum variable shift. - shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value)) + shift_var = max(var._std_dev / 1e5, 1e-8 * abs(var._nominal_value)) # An exceptional case is that of var being exactly zero. # In this case, an arbitrary shift is used for the # numerical calculation of the derivative. The resulting @@ -225,27 +239,29 @@ def wrapped_func(arr, *args, **kwargs): shift_var = 1e-8 # Shift of all the elements of arr when var changes by shift_var: - shift_arr = array_derivative(arr, var)*shift_var + shift_arr = array_derivative(arr, var) * shift_var # Origin value of array arr when var is shifted by shift_var: shifted_arr_values = arr_nominal_value + shift_arr func_shifted = func(shifted_arr_values, *args, **kwargs) - numerical_deriv = (func_shifted-func_nominal_value)/shift_var + numerical_deriv = (func_shifted - func_nominal_value) / shift_var # Update of the list of variables and associated # derivatives, for each element: - for (derivative_dict, derivative_value) in ( - zip(derivatives.flat, numerical_deriv.flat)): - + for derivative_dict, derivative_value in zip( + derivatives.flat, numerical_deriv.flat + ): if derivative_value: derivative_dict[var] = derivative_value # numbers with uncertainties are built from the result: return numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, - numpy.vectorize(uncert_core.LinearCombination)(derivatives)) + numpy.vectorize(uncert_core.LinearCombination)(derivatives), + ) - wrapped_func = uncert_core.set_doc("""\ + wrapped_func = uncert_core.set_doc( + """\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. @@ -255,7 +271,9 @@ def wrapped_func(arr, *args, **kwargs): uncert_core.Variable objects will be incorrect. Original documentation: - %s""" % (func.__name__, func.__doc__))(wrapped_func) + %s""" + % (func.__name__, func.__doc__) + )(wrapped_func) # It is easier to work with wrapped_func, which represents a # wrapped version of 'func', when it bears the same name as @@ -264,9 +282,11 @@ def wrapped_func(arr, *args, **kwargs): return wrapped_func + ############################################################################### # Arrays + def uarray(nominal_values, std_devs=None): """ Return a NumPy array of numbers with uncertainties @@ -283,17 +303,20 @@ def uarray(nominal_values, std_devs=None): """ if std_devs is None: # Obsolete, single tuple argument call - raise TypeError('uarray() should be called with two arguments.') + raise TypeError("uarray() should be called with two arguments.") - return (numpy.vectorize( + return numpy.vectorize( # ! Looking up uncert_core.Variable beforehand through # '_Variable = uncert_core.Variable' does not result in a # significant speed up: - lambda v, s: uncert_core.Variable(v, s), otypes=[object]) - (nominal_values, std_devs)) + lambda v, s: uncert_core.Variable(v, s), + otypes=[object], + )(nominal_values, std_devs) + ############################################################################### + def array_derivative(array_like, var): """ Return the derivative of the given array with respect to the @@ -307,12 +330,15 @@ def array_derivative(array_like, var): var -- Variable object. """ - return numpy.vectorize(lambda u: derivative(u, var), - # The type is set because an - # integer derivative should not - # set the output type of the - # array: - otypes=[float])(array_like) + return numpy.vectorize( + lambda u: derivative(u, var), + # The type is set because an + # integer derivative should not + # set the output type of the + # array: + otypes=[float], + )(array_like) + def func_with_deriv_to_uncert_func(func_with_derivatives): # This function is used for instance for the calculation of the @@ -396,7 +422,9 @@ def wrapped_func(array_like, *args, **kwargs): array_nominal, type(array_like), (array_derivative(array_version, var) for var in variables), - *args, **kwargs) + *args, + **kwargs, + ) func_nominal_value = next(func_then_derivs) @@ -408,10 +436,9 @@ def wrapped_func(array_like, *args, **kwargs): # Calculation of the derivatives of the result with respect to # the variables. - derivatives = ( - numpy.array( - [{} for _ in range(func_nominal_value.size)], dtype=object) - .reshape(func_nominal_value.shape)) + derivatives = numpy.array( + [{} for _ in range(func_nominal_value.size)], dtype=object + ).reshape(func_nominal_value.shape) # Memory-efficient approach. A memory-hungry approach would # be to calculate the matrix derivatives will respect to all @@ -420,13 +447,12 @@ def wrapped_func(array_like, *args, **kwargs): # progressively build the matrix of derivatives, by # progressively adding the derivatives with respect to # successive variables. - for (var, deriv_wrt_var) in zip(variables, - func_then_derivs): - + for var, deriv_wrt_var in zip(variables, func_then_derivs): # Update of the list of variables and associated # derivatives, for each element: - for (derivative_dict, derivative_value) in zip( - derivatives.flat, deriv_wrt_var.flat): + for derivative_dict, derivative_value in zip( + derivatives.flat, deriv_wrt_var.flat + ): if derivative_value: derivative_dict[var] = derivative_value @@ -434,7 +460,8 @@ def wrapped_func(array_like, *args, **kwargs): # result: result = numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, - numpy.vectorize(uncert_core.LinearCombination)(derivatives)) + numpy.vectorize(uncert_core.LinearCombination)(derivatives), + ) # NumPy matrices that contain numbers with uncertainties are # better as unumpy matrices: @@ -445,8 +472,10 @@ def wrapped_func(array_like, *args, **kwargs): return wrapped_func + ########## Matrix inverse + def inv_with_derivatives(arr, input_type, derivatives): """ Defines the matrix inverse and its derivatives. @@ -471,8 +500,10 @@ def inv_with_derivatives(arr, input_type, derivatives): derivative_mat = numpy.asmatrix(derivative) yield -inverse_mat * derivative_mat * inverse_mat + inv = func_with_deriv_to_uncert_func(inv_with_derivatives) -inv.__doc__ = """\ +inv.__doc__ = ( + """\ Version of numpy.linalg.inv that works with array-like objects that contain numbers with uncertainties. @@ -483,10 +514,13 @@ def inv_with_derivatives(arr, input_type, derivatives): Original documentation: %s - """ % numpy.linalg.inv.__doc__ + """ + % numpy.linalg.inv.__doc__ +) ########## Matrix pseudo-inverse + def pinv_with_derivatives(arr, input_type, derivatives, rcond): """ Defines the matrix pseudo-inverse and its derivatives. @@ -518,28 +552,28 @@ def pinv_with_derivatives(arr, input_type, derivatives, rcond): # http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse # Shortcuts. All the following factors should be numpy.matrix objects: - PA = arr*inverse_mat - AP = inverse_mat*arr - factor21 = inverse_mat*inverse_mat.H - factor22 = numpy.eye(arr.shape[0])-PA - factor31 = numpy.eye(arr.shape[1])-AP - factor32 = inverse_mat.H*inverse_mat + PA = arr * inverse_mat + AP = inverse_mat * arr + factor21 = inverse_mat * inverse_mat.H + factor22 = numpy.eye(arr.shape[0]) - PA + factor31 = numpy.eye(arr.shape[1]) - AP + factor32 = inverse_mat.H * inverse_mat # Successive derivatives of the inverse: for derivative in derivatives: derivative_mat = numpy.asmatrix(derivative) - term1 = -inverse_mat*derivative_mat*inverse_mat + term1 = -inverse_mat * derivative_mat * inverse_mat derivative_mat_H = derivative_mat.H - term2 = factor21*derivative_mat_H*factor22 - term3 = factor31*derivative_mat_H*factor32 - yield term1+term2+term3 + term2 = factor21 * derivative_mat_H * factor22 + term3 = factor31 * derivative_mat_H * factor32 + yield term1 + term2 + term3 + # Default rcond argument for the generalization of numpy.linalg.pinv: # # Most common modern case first: -try: - pinv_default = ( - inspect.signature(numpy.linalg.pinv).parameters["rcond"].default) +try: + pinv_default = inspect.signature(numpy.linalg.pinv).parameters["rcond"].default except AttributeError: # No inspect.signature() before Python 3.3 try: # In numpy 1.17+, pinv is wrapped using a decorator which unfortunately @@ -552,10 +586,13 @@ def pinv_with_derivatives(arr, input_type, derivatives, rcond): pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives) + def pinv(array_like, rcond=pinv_default): return pinv_with_uncert(array_like, rcond) -pinv = uncert_core.set_doc(""" + +pinv = uncert_core.set_doc( + """ Version of numpy.linalg.pinv that works with array-like objects that contain numbers with uncertainties. @@ -566,10 +603,13 @@ def pinv(array_like, rcond=pinv_default): Original documentation: %s - """ % numpy.linalg.pinv.__doc__)(pinv) + """ + % numpy.linalg.pinv.__doc__ +)(pinv) ########## Matrix class + class matrix(numpy.matrix): # The name of this class is the same as NumPy's, which is why it # does not follow PEP 8. @@ -594,8 +634,7 @@ def getI(self): m, n = self.shape return (inv if m == n else pinv)(self) - I = numpy.matrix.I.getter(getI) - + I = numpy.matrix.I.getter(getI) # noqa # !!! The following function is not in the official documentation # of the module. Maybe this is because arrays with uncertainties @@ -616,6 +655,7 @@ def nominal_values(self): def std_devs(self): return numpy.matrix(std_devs(self)) + def umatrix(nominal_values, std_devs=None): """ Constructs a matrix that contains numbers with uncertainties. @@ -628,12 +668,14 @@ def umatrix(nominal_values, std_devs=None): """ if std_devs is None: # Obsolete, single tuple argument call - raise TypeError('umatrix() should be called with two arguments.') + raise TypeError("umatrix() should be called with two arguments.") return uarray(nominal_values, std_devs).view(matrix) + ############################################################################### + def define_vectorized_funcs(): """ Defines vectorized versions of functions from uncertainties.umath_core. @@ -645,20 +687,24 @@ def define_vectorized_funcs(): this_module = sys.modules[__name__] # NumPy does not always use the same function names as the math # module: - func_name_translations = dict([ - (f_name, 'arc'+f_name[1:]) - for f_name in ['acos', 'acosh', 'asin', 'atan', 'atan2', 'atanh']]) + func_name_translations = dict( + [ + (f_name, "arc" + f_name[1:]) + for f_name in ["acos", "acosh", "asin", "atan", "atan2", "atanh"] + ] + ) new_func_names = [ func_name_translations.get(function_name, function_name) # The functions from umath_core.non_std_wrapped_funcs # (available from umath) are normally not in # NumPy, so they are not included here: - for function_name in umath_core.many_scalars_to_scalar_funcs] - - for (function_name, unumpy_name) in zip( - umath_core.many_scalars_to_scalar_funcs, new_func_names): + for function_name in umath_core.many_scalars_to_scalar_funcs + ] + for function_name, unumpy_name in zip( + umath_core.many_scalars_to_scalar_funcs, new_func_names + ): # ! The newly defined functions (uncertainties.unumpy.cos, etc.) # do not behave exactly like their NumPy equivalent (numpy.cos, # etc.): cos(0) gives an array() and not a @@ -673,7 +719,8 @@ def define_vectorized_funcs(): # return an array with a boolean data type (instead of # object), which allows the result to be used with NumPy's # boolean indexing. - {} if function_name in umath_core.locally_cst_funcs + {} + if function_name in umath_core.locally_cst_funcs # If by any chance a function returns, in a particular # case, an integer instead of a number with uncertainty, # side-effects in vectorize() would fix the resulting @@ -681,22 +728,28 @@ def define_vectorized_funcs(): # vectorize(), at least in NumPy around 2010 maybe, # decided about the output data type by looking at the # type of first element only). - else {'otypes': [object]}) + else {"otypes": [object]} + ) setattr( - this_module, unumpy_name, + this_module, + unumpy_name, #!!!! For umath_core.locally_cst_funcs, would it make sense # to optimize this by using instead the equivalent (? see # above) vectorized NumPy function on the nominal values? - numpy.vectorize(func, - doc="""\ + numpy.vectorize( + func, + doc="""\ Vectorized version of umath.%s. Original documentation: -%s""" % (function_name, func.__doc__), - **otypes)) - +%s""" + % (function_name, func.__doc__), + **otypes, + ), + ) __all__.append(unumpy_name) + define_vectorized_funcs() diff --git a/uncertainties/unumpy/ulinalg.py b/uncertainties/unumpy/ulinalg.py index 747c231a..407621b3 100644 --- a/uncertainties/unumpy/ulinalg.py +++ b/uncertainties/unumpy/ulinalg.py @@ -5,10 +5,8 @@ (c) 2010-2016 by Eric O. LEBIGOT (EOL) . """ -from uncertainties import __author__ from uncertainties.unumpy.core import inv, pinv # This module cannot import unumpy because unumpy imports this module. -__all__ = ['inv', 'pinv'] - +__all__ = ["inv", "pinv"]