diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000000..668e3a4f3610 --- /dev/null +++ b/.flake8 @@ -0,0 +1,45 @@ +[flake8] +extend-ignore = + E203, # whitespace before ':' (currently conflicts with black formatting) + E501, # line too long + F401, # module imported but unused + F403, # ‘from module import *’ used; unable to detect undefined names + F405, # name may be undefined, or defined from star imports: module + F821, # undefined name name + W505, # doc line too long (105 > 80 characters) + D100, # missing docstring in public module + D101, # missing docstring in public class + D102, # missing docstring in public method + D103, # missing docstring in public function + D104, # missing docstring in public package + D105, # missing docstring in magic method + D107, # missing docstring in __init__ + D202, # no blank lines allowed after function docstring + D400, # first line should end with a period + D401, # first line should be in imperative mood + D414 # section has no content + +per-file-ignores = + # module level import not at top of file: + __init__.py: E402 + # do not use bare 'except': + dpnp/random/dpnp_iface_random.py: E722, B001 + # function definition does not bind loop variable + tests/third_party/cupy/sorting_tests/test_count.py: B023 + # 1 blank line required between summary line and description + dpnp/dpnp_iface.py: D205 + tests/tests_perf/math_tests/test_trigonometric.py: D205 + +filename = *.py, *.pyx +max_line_length = 80 +max-doc-length = 80 +show-source = True + +exclude = + .git, + __pycache__, + version.py + +# Print detailed statistic if any issue detected +count = True +statistics = True diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 723347913ac3..151897882438 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -9,6 +9,8 @@ jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: pre-commit/action@v2.0.0 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: '3.9' + - uses: pre-commit/action@v3.0.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 76b1a74376d6..f1337e340c42 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,8 +2,58 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/PyCQA/bandit - rev: '1.7.0' + rev: '1.7.4' hooks: - id: bandit pass_filenames: false args: ["-r", "dpnp", "-lll"] +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-case-conflict + - id: check-toml + - id: end-of-file-fixer + - id: fix-byte-order-marker + - id: trailing-whitespace +- repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.9.0 + hooks: + - id: python-check-blanket-type-ignore + - id: python-check-mock-methods + - id: python-no-eval + - id: python-no-log-warn + - id: python-use-type-annotations + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal +- repo: https://github.com/psf/black + rev: 22.8.0 + hooks: + - id: black + args: ["--check", "--diff", "--color"] + language_version: python3.9 +- repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: isort (python) + args: ["--check-only", "--diff"] + additional_dependencies: [colorama] + - id: isort + name: isort (cython) + types: [cython] + args: ["--check-only", "--diff"] + additional_dependencies: [colorama] +- repo: https://gitlab.com/pycqa/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + args: ["--config=.flake8"] + additional_dependencies: + - flake8-docstrings==1.6.0 + - flake8-bugbear==22.8.23 +- repo: https://github.com/pocc/pre-commit-hooks + rev: v1.3.5 + hooks: + - id: clang-format + args: ["-i"] diff --git a/LICENSE.txt b/LICENSE.txt index 5bebac93a3aa..37447a0fd127 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,22 +1,22 @@ -Copyright (c) 2016-2020, Intel Corporation +Copyright (c) 2016-2022, Intel Corporation All rights reserved. -Redistribution and use in source and binary forms, with or without +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -- Redistributions of source code must retain the above copyright notice, +- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/benchmarks/benchmarks/bench_elementwise.py b/benchmarks/benchmarks/bench_elementwise.py index 05a1174fe412..466fbb62cec1 100644 --- a/benchmarks/benchmarks/bench_elementwise.py +++ b/benchmarks/benchmarks/bench_elementwise.py @@ -1,5 +1,7 @@ import numpy + import dpnp + from .common import Benchmark @@ -8,12 +10,13 @@ # but looks like first execution has additional overheads # (need to be investigated) class Elementwise(Benchmark): - executors = {'dpnp': dpnp, 'numpy': numpy} - params = [['dpnp', 'numpy'], - [2**16, 2**20, 2**24], - ['float64', 'float32', 'int64', 'int32'] - ] - param_names = ['executor', 'size', 'dtype'] + executors = {"dpnp": dpnp, "numpy": numpy} + params = [ + ["dpnp", "numpy"], + [2**16, 2**20, 2**24], + ["float64", "float32", "int64", "int32"], + ] + param_names = ["executor", "size", "dtype"] def setup(self, executor, size, dtype): self.np = self.executors[executor] diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 69cc9b21edfd..90ccf4c4ccb5 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -1,14 +1,17 @@ -from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1 - import numpy + import dpnp +from .common import TYPES1, Benchmark, get_indexes_rand, get_squares_ + class Eindot(Benchmark): - params = [[dpnp, numpy], - [16, 32, 64, 128, 256, 512, 1024], - ['float64', 'float32', 'int64', 'int32']] - param_names = ['executor', 'size', 'dtype'] + params = [ + [dpnp, numpy], + [16, 32, 64, 128, 256, 512, 1024], + ["float64", "float32", "int64", "int32"], + ] + param_names = ["executor", "size", "dtype"] def setup(self, np, size, dtype): dt = getattr(np, dtype) @@ -45,13 +48,13 @@ def time_dot_trans_atc_a(self, np, *args): np.dot(self.atc, self.a) def time_einsum_i_ij_j(self, np, *args): - np.einsum('i,ij,j', self.d, self.b, self.c) + np.einsum("i,ij,j", self.d, self.b, self.c) def time_einsum_ij_jk_a_b(self, np, *args): - np.einsum('ij,jk', self.a, self.b) + np.einsum("ij,jk", self.a, self.b) def time_einsum_ijk_jil_kl(self, np, *args): - np.einsum('ijk,jil->kl', self.a3, self.b3) + np.einsum("ijk,jil->kl", self.a3, self.b3) def time_inner_trans_a_a(self, np, *args): np.inner(self.a, self.a) @@ -82,20 +85,19 @@ def time_tensordot_a_b_axes_1_0_0_1(self, np, *args): class Linalg(Benchmark): - params = [[dpnp, numpy], - ['svd', 'pinv', 'det', 'norm'], - TYPES1] - param_names = ['executor', 'op', 'type'] + params = [[dpnp, numpy], ["svd", "pinv", "det", "norm"], TYPES1] + param_names = ["executor", "op", "type"] def setup(self, np, op, typename): - np.seterr(all='ignore') + np.seterr(all="ignore") self.func = getattr(np.linalg, op) - if op == 'cholesky': + if op == "cholesky": # we need a positive definite - self.a = np.dot(get_squares_()[typename], - get_squares_()[typename].T) + self.a = np.dot( + get_squares_()[typename], get_squares_()[typename].T + ) else: self.a = get_squares_()[typename] @@ -111,37 +113,38 @@ def time_op(self, np, op, typename): class Lstsq(Benchmark): params = [dpnp, numpy] - param_names = ['executor'] + param_names = ["executor"] def setup(self, np): - self.a = get_squares_()['float64'] + self.a = get_squares_()["float64"] self.b = get_indexes_rand()[:100].astype(np.float64) def time_numpy_linalg_lstsq_a__b_float64(self, np): np.linalg.lstsq(self.a, self.b, rcond=-1) + # class Einsum(Benchmark): - # param_names = ['dtype'] - # params = [[np.float64]] - # def setup(self, dtype): - # self.a = np.arange(2900, dtype=dtype) - # self.b = np.arange(3000, dtype=dtype) - # self.c = np.arange(24000, dtype=dtype).reshape(20, 30, 40) - # self.c1 = np.arange(1200, dtype=dtype).reshape(30, 40) - # self.d = np.arange(10000, dtype=dtype).reshape(10,100,10) - - # #outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two - # def time_einsum_outer(self, dtype): - # np.einsum("i,j", self.a, self.b, optimize=True) - - # # multiply(a, b):trigger sum_of_products_contig_two - # def time_einsum_multiply(self, dtype): - # np.einsum("..., ...", self.c1, self.c , optimize=True) - - # # sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two - # def time_einsum_sum_mul(self, dtype): - # np.einsum(",i...->", 300, self.d, optimize=True) - - # # sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two - # def time_einsum_sum_mul2(self, dtype): - # np.einsum("i...,->", self.d, 300, optimize=True) +# param_names = ['dtype'] +# params = [[np.float64]] +# def setup(self, dtype): +# self.a = np.arange(2900, dtype=dtype) +# self.b = np.arange(3000, dtype=dtype) +# self.c = np.arange(24000, dtype=dtype).reshape(20, 30, 40) +# self.c1 = np.arange(1200, dtype=dtype).reshape(30, 40) +# self.d = np.arange(10000, dtype=dtype).reshape(10,100,10) + +# #outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two +# def time_einsum_outer(self, dtype): +# np.einsum("i,j", self.a, self.b, optimize=True) + +# # multiply(a, b):trigger sum_of_products_contig_two +# def time_einsum_multiply(self, dtype): +# np.einsum("..., ...", self.c1, self.c , optimize=True) + +# # sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two +# def time_einsum_sum_mul(self, dtype): +# np.einsum(",i...->", 300, self.d, optimize=True) + +# # sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two +# def time_einsum_sum_mul2(self, dtype): +# np.einsum("i...,->", self.d, 300, optimize=True) diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index dfa702191dc5..97d4205b62d1 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -1,13 +1,15 @@ import numpy + import dpnp + from .common import Benchmark # asv run --python=python --quick --bench Sample class Sample(Benchmark): - executors = {'dpnp': dpnp, 'numpy': numpy} - params = [['dpnp', 'numpy'], [2**16, 2**20, 2**24]] - param_names = ['executor', 'size'] + executors = {"dpnp": dpnp, "numpy": numpy} + params = [["dpnp", "numpy"], [2**16, 2**20, 2**24]] + param_names = ["executor", "size"] def setup(self, executor, size): self.executor = self.executors[executor] diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index 3c17f7dd14f8..451c68ffc34f 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,6 +1,7 @@ -import numpy import random +import numpy + # Various pre-crafted datasets/variables for testing # !!! Must not be changed -- only appended !!! # while testing numpy we better not rely on numpy to produce random @@ -16,13 +17,18 @@ # a set of interesting types to test TYPES1 = [ - 'int16', 'float16', - 'int32', 'float32', - 'int64', 'float64', 'complex64', - 'longfloat', 'complex128', + "int16", + "float16", + "int32", + "float32", + "int64", + "float64", + "complex64", + "longfloat", + "complex128", ] -if 'complex256' in numpy.typeDict: - TYPES1.append('complex256') +if "complex256" in numpy.typeDict: + TYPES1.append("complex256") def memoize(func): @@ -32,6 +38,7 @@ def wrapper(): if not result: result.append(func()) return result[0] + return wrapper @@ -39,6 +46,7 @@ def wrapper(): # replicate 10 times to speed up initial imports of this helper # and generate some redundancy + @memoize def get_values(): rnd = numpy.random.RandomState(1) @@ -49,14 +57,15 @@ def get_values(): @memoize def get_squares(): values = get_values() - squares = {t: numpy.array(values, - dtype=getattr(numpy, t)).reshape((nx, ny)) - for t in TYPES1} + squares = { + t: numpy.array(values, dtype=getattr(numpy, t)).reshape((nx, ny)) + for t in TYPES1 + } # adjust complex ones to have non-degenerated imagery part -- use # original data transposed for that for t, v in squares.items(): - if t.startswith('complex'): + if t.startswith("complex"): v += v.T * 1j return squares @@ -90,8 +99,8 @@ def get_indexes(): def get_indexes_rand(): rnd = random.Random(1) - indexes_rand = get_indexes().tolist() # copy - rnd.shuffle(indexes_rand) # in-place shuffle + indexes_rand = get_indexes().tolist() # copy + rnd.shuffle(indexes_rand) # in-place shuffle indexes_rand = numpy.array(indexes_rand) return indexes_rand diff --git a/benchmarks/pytest_benchmark/README.md b/benchmarks/pytest_benchmark/README.md index 154860e39eae..d3c7478509a3 100644 --- a/benchmarks/pytest_benchmark/README.md +++ b/benchmarks/pytest_benchmark/README.md @@ -20,7 +20,7 @@ pytest-benchmark compare results.json --csv=results.csv --group-by='name' ``` ## Optional: creating histogram -Note: make sure that `pytest-benchmark[histogram]` installed +Note: make sure that `pytest-benchmark[histogram]` installed ```bash # example pip install pytest-benchmark[histogram] diff --git a/benchmarks/pytest_benchmark/test_random.py b/benchmarks/pytest_benchmark/test_random.py index 61f46f95ba41..ccb4ef39745d 100644 --- a/benchmarks/pytest_benchmark/test_random.py +++ b/benchmarks/pytest_benchmark/test_random.py @@ -1,8 +1,7 @@ - # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -26,10 +25,10 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** +import numpy as np import pytest import dpnp -import numpy as np ROUNDS = 30 ITERATIONS = 4 @@ -37,36 +36,82 @@ NNUMBERS = 2**26 -@pytest.mark.parametrize("function", [dpnp.random.beta, np.random.beta], - ids=["dpnp", "numpy"]) +@pytest.mark.parametrize( + "function", [dpnp.random.beta, np.random.beta], ids=["dpnp", "numpy"] +) def test_beta(benchmark, function): - result = benchmark.pedantic(target=function, args=(4.0, 5.0, NNUMBERS,), - rounds=ROUNDS, iterations=ITERATIONS) + benchmark.pedantic( + target=function, + args=( + 4.0, + 5.0, + NNUMBERS, + ), + rounds=ROUNDS, + iterations=ITERATIONS, + ) -@pytest.mark.parametrize("function", [dpnp.random.exponential, np.random.exponential], - ids=["dpnp", "numpy"]) +@pytest.mark.parametrize( + "function", + [dpnp.random.exponential, np.random.exponential], + ids=["dpnp", "numpy"], +) def test_exponential(benchmark, function): - result = benchmark.pedantic(target=function, args=(4.0, NNUMBERS,), - rounds=ROUNDS, iterations=ITERATIONS) + benchmark.pedantic( + target=function, + args=( + 4.0, + NNUMBERS, + ), + rounds=ROUNDS, + iterations=ITERATIONS, + ) -@pytest.mark.parametrize("function", [dpnp.random.gamma, np.random.gamma], - ids=["dpnp", "numpy"]) +@pytest.mark.parametrize( + "function", [dpnp.random.gamma, np.random.gamma], ids=["dpnp", "numpy"] +) def test_gamma(benchmark, function): - result = benchmark.pedantic(target=function, args=(2.0, 4.0, NNUMBERS,), - rounds=ROUNDS, iterations=ITERATIONS) + benchmark.pedantic( + target=function, + args=( + 2.0, + 4.0, + NNUMBERS, + ), + rounds=ROUNDS, + iterations=ITERATIONS, + ) -@pytest.mark.parametrize("function", [dpnp.random.normal, np.random.normal], - ids=["dpnp", "numpy"]) +@pytest.mark.parametrize( + "function", [dpnp.random.normal, np.random.normal], ids=["dpnp", "numpy"] +) def test_normal(benchmark, function): - result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,), - rounds=ROUNDS, iterations=ITERATIONS) + benchmark.pedantic( + target=function, + args=( + 0.0, + 1.0, + NNUMBERS, + ), + rounds=ROUNDS, + iterations=ITERATIONS, + ) -@pytest.mark.parametrize("function", [dpnp.random.uniform, np.random.uniform], - ids=["dpnp", "numpy"]) +@pytest.mark.parametrize( + "function", [dpnp.random.uniform, np.random.uniform], ids=["dpnp", "numpy"] +) def test_uniform(benchmark, function): - result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,), - rounds=ROUNDS, iterations=ITERATIONS) + benchmark.pedantic( + target=function, + args=( + 0.0, + 1.0, + NNUMBERS, + ), + rounds=ROUNDS, + iterations=ITERATIONS, + ) diff --git a/doc/Makefile b/doc/Makefile index a580a4e46f0b..22f09a08be2f 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -17,4 +17,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/_templates/autosummary/class.rst b/doc/_templates/autosummary/class.rst index e129f15ee818..90e3fe72754b 100644 --- a/doc/_templates/autosummary/class.rst +++ b/doc/_templates/autosummary/class.rst @@ -50,4 +50,4 @@ {% for item in attributes %} .. autoattribute:: {{ item }} {%- endfor %} -{% endif %} {% endblock %} \ No newline at end of file +{% endif %} {% endblock %} diff --git a/doc/comparison_generator.py b/doc/comparison_generator.py index ae601745a3d2..35190bec2f27 100644 --- a/doc/comparison_generator.py +++ b/doc/comparison_generator.py @@ -19,7 +19,7 @@ def calc_totals(base_mod, ref_mods, cls): def get_functions(obj): funcs = [] for n, _ in inspect.getmembers(obj): - if n in ['test']: + if n in ["test"]: continue if not callable(getattr(obj, n)): continue @@ -27,7 +27,7 @@ def get_functions(obj): continue if not n[0].islower(): continue - if n.startswith('__'): + if n.startswith("__"): continue funcs.append(n) @@ -38,21 +38,21 @@ def import_mod(mod, cls): obj = importlib.import_module(mod) if cls: obj = getattr(obj, cls) - return obj, ':meth:`{}.{}.{{}}`'.format(mod, cls) + return obj, ":meth:`{}.{}.{{}}`".format(mod, cls) else: # ufunc is not a function - return obj, ':obj:`{}.{{}}`'.format(mod) + return obj, ":obj:`{}.{{}}`".format(mod) def generate_totals(base_mod, ref_mods, base_type, ref_types, cls): all_types = [base_type] + ref_types - header = ', '.join('**{} Total**'.format(t) for t in all_types) - header = ' {}'.format(header) + header = ", ".join("**{} Total**".format(t) for t in all_types) + header = " {}".format(header) totals = calc_totals(base_mod, ref_mods, cls) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) return [header, total] @@ -61,7 +61,7 @@ def generate_comparison_rst(base_mod, ref_mods, base_type, ref_types, cls): base_obj, base_fmt = import_mod(base_mod, cls) base_funcs = get_functions(base_obj) - header = ', '.join([base_type] + ref_types) + header = ", ".join([base_type] + ref_types) rows = [] for f in sorted(base_funcs): @@ -72,25 +72,29 @@ def generate_comparison_rst(base_mod, ref_mods, base_type, ref_types, cls): ref_obj, ref_fmt = import_mod(ref_mod, cls) ref_funcs = get_functions(ref_obj) - ref_cell = r'\-' + ref_cell = r"\-" if f in ref_funcs: ref_cell = ref_fmt.format(f) ref_cells.append(ref_cell) - cells = ', '.join([base_cell] + ref_cells) - line = ' {}'.format(cells) + cells = ", ".join([base_cell] + ref_cells) + line = " {}".format(cells) rows.append(line) totals = generate_totals(base_mod, ref_mods, base_type, ref_types, cls) - return ['.. csv-table::', ' :header: {}'.format(header), ''] + rows + totals + return ( + [".. csv-table::", " :header: {}".format(header), ""] + rows + totals + ) def section(header, base_mod, ref_mods, base_type, ref_types, cls=None): - comparison_rst = generate_comparison_rst(base_mod, ref_mods, base_type, ref_types, cls) + comparison_rst = generate_comparison_rst( + base_mod, ref_mods, base_type, ref_types, cls + ) - return [header, '~' * len(header), ''] + comparison_rst + [''] + return [header, "~" * len(header), ""] + comparison_rst + [""] def generate_totals_numbers(header, base_mod, ref_mods, cls=None): @@ -106,61 +110,74 @@ def generate_totals_numbers(header, base_mod, ref_mods, cls=None): totals = [header] + calc_totals(base_mod, ref_mods, cls) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) return total, counter_funcs def generate_table_numbers(base_mod, ref_mods, base_type, ref_types, cls=None): - all_types = ['Name'] + [base_type] + ref_types - header = ', '.join('**{}**'.format(t) for t in all_types) - header = ' {}'.format(header) + all_types = ["Name"] + [base_type] + ref_types + header = ", ".join("**{}**".format(t) for t in all_types) + header = " {}".format(header) rows = [] counters_funcs = [] totals = [] - totals_, counters_funcs_ = generate_totals_numbers('Module-Level', base_mod, ref_mods) + totals_, counters_funcs_ = generate_totals_numbers( + "Module-Level", base_mod, ref_mods + ) totals.append(totals_) counters_funcs.append(counters_funcs_) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) rows.append(total) totals = [] - totals_, counters_funcs_ = generate_totals_numbers('Multi-Dimensional Array', base_mod, ref_mods, cls='ndarray') + totals_, counters_funcs_ = generate_totals_numbers( + "Multi-Dimensional Array", base_mod, ref_mods, cls="ndarray" + ) totals.append(totals_) counters_funcs.append(counters_funcs_) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) rows.append(total) totals = [] - totals_, counters_funcs_ = generate_totals_numbers('Linear Algebra', base_mod + '.linalg', - [m + '.linalg' for m in ref_mods]) + totals_, counters_funcs_ = generate_totals_numbers( + "Linear Algebra", + base_mod + ".linalg", + [m + ".linalg" for m in ref_mods], + ) totals.append(totals_) counters_funcs.append(counters_funcs_) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) rows.append(total) totals = [] - totals_, counters_funcs_ = generate_totals_numbers('Discrete Fourier Transform', base_mod + '.fft', - [m + '.fft' for m in ref_mods]) + totals_, counters_funcs_ = generate_totals_numbers( + "Discrete Fourier Transform", + base_mod + ".fft", + [m + ".fft" for m in ref_mods], + ) totals.append(totals_) counters_funcs.append(counters_funcs_) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) rows.append(total) totals = [] - totals_, counters_funcs_ = generate_totals_numbers('Random Sampling', base_mod + '.random', - [m + '.random' for m in ref_mods]) + totals_, counters_funcs_ = generate_totals_numbers( + "Random Sampling", + base_mod + ".random", + [m + ".random" for m in ref_mods], + ) totals.append(totals_) counters_funcs.append(counters_funcs_) - cells = ', '.join(str(t) for t in totals) - total = ' {}'.format(cells) + cells = ", ".join(str(t) for t in totals) + total = " {}".format(cells) rows.append(total) counter_functions = [] @@ -168,16 +185,16 @@ def generate_table_numbers(base_mod, ref_mods, base_type, ref_types, cls=None): counter = 0 for j in range(len(counters_funcs)): counter += counters_funcs[j][i] - counter_functions.append('{}'.format(counter)) + counter_functions.append("{}".format(counter)) - summary = ['Total'] + counter_functions - cells = ', '.join(str(t) for t in summary) - summary_total = ' {}'.format(cells) + summary = ["Total"] + counter_functions + cells = ", ".join(str(t) for t in summary) + summary_total = " {}".format(cells) rows.append(summary_total) - comparison_rst = ['.. csv-table::', ''] + [header] + rows + comparison_rst = [".. csv-table::", ""] + [header] + rows - return ['Summary', '~' * len('Summary'), ''] + comparison_rst + [''] + return ["Summary", "~" * len("Summary"), ""] + comparison_rst + [""] def generate(): @@ -187,47 +204,64 @@ def generate(): try: import dpnp - ref_mods += ['dpnp'] - ref_types += ['DPNP'] - ref_vers = ['DPNP(v{})'.format(dpnp.__version__)] + + ref_mods += ["dpnp"] + ref_types += ["DPNP"] + ref_vers = ["DPNP(v{})".format(dpnp.__version__)] except ImportError as err: print(f"DOCBUILD: Can't load DPNP module with error={err}") try: import cupy - ref_mods += ['cupy'] - ref_types += ['CuPy'] - ref_vers += ['CuPy(v{})'.format(cupy.__version__)] + + ref_mods += ["cupy"] + ref_types += ["CuPy"] + ref_vers += ["CuPy(v{})".format(cupy.__version__)] except ImportError as err: print(f"DOCBUILD: Can't load CuPy module with error={err}") try: import numpy - base_mod = 'numpy' # TODO: Why string? - base_type = 'NumPy' - base_ver = '{}(v{})'.format(base_type, numpy.__version__) + + base_mod = "numpy" # TODO: Why string? + base_type = "NumPy" + base_ver = "{}(v{})".format(base_type, numpy.__version__) except ImportError as err: print(f"DOCBUILD: Can't load {base_type} module with error={err}") - header = ' / '.join([base_ver] + ref_vers) + ' APIs' - buf = ['**{}**'.format(header), ''] + header = " / ".join([base_ver] + ref_vers) + " APIs" + buf = ["**{}**".format(header), ""] - buf += generate_table_numbers( - base_mod, ref_mods, base_type, ref_types) + buf += generate_table_numbers(base_mod, ref_mods, base_type, ref_types) + buf += section("Module-Level", base_mod, ref_mods, base_type, ref_types) buf += section( - 'Module-Level', - base_mod, ref_mods, base_type, ref_types) + "Multi-Dimensional Array", + base_mod, + ref_mods, + base_type, + ref_types, + cls="ndarray", + ) buf += section( - 'Multi-Dimensional Array', - base_mod, ref_mods, base_type, ref_types, cls='ndarray') + "Linear Algebra", + base_mod + ".linalg", + [m + ".linalg" for m in ref_mods], + base_type, + ref_types, + ) buf += section( - 'Linear Algebra', - base_mod + '.linalg', [m + '.linalg' for m in ref_mods], base_type, ref_types) + "Discrete Fourier Transform", + base_mod + ".fft", + [m + ".fft" for m in ref_mods], + base_type, + ref_types, + ) buf += section( - 'Discrete Fourier Transform', - base_mod + '.fft', [m + '.fft' for m in ref_mods], base_type, ref_types) - buf += section( - 'Random Sampling', - base_mod + '.random', [m + '.random' for m in ref_mods], base_type, ref_types) - - return '\n'.join(buf) + "Random Sampling", + base_mod + ".random", + [m + ".random" for m in ref_mods], + base_type, + ref_types, + ) + + return "\n".join(buf) diff --git a/doc/conf.py b/doc/conf.py index cd7c9a1002e2..803bf9f33317 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -15,25 +15,25 @@ import os import sys - sys.path.insert(0, os.path.abspath('.')) + sys.path.insert(0, os.path.abspath(".")) import comparison_generator # Generate comparison table. -with open('reference/comparison_table.rst.inc', 'w') as fd: +with open("reference/comparison_table.rst.inc", "w") as fd: fd.write(comparison_generator.generate()) # -- Project information ----------------------------------------------------- -project = 'dpnp' -copyright = '2020, Intel' -author = 'Intel' +project = "dpnp" +copyright = "2020, Intel" +author = "Intel" # The short X.Y version -version = '0.10' +version = "0.10" # The full version, including alpha/beta/rc tags -release = '0.10.1' +release = "0.10.1" # -- General configuration --------------------------------------------------- @@ -46,27 +46,27 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', - 'sphinx.ext.intersphinx', - 'sphinx.ext.napoleon', - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", + "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -78,10 +78,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- @@ -116,13 +116,20 @@ # 'searchbox.html']``. # # html_sidebars = {} -html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']} +html_sidebars = { + "**": [ + "globaltoc.html", + "relations.html", + "sourcelink.html", + "searchbox.html", + ] +} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'dpnpdoc' +htmlhelp_basename = "dpnpdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -131,15 +138,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -149,8 +153,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'dpnp.tex', 'dpnp Documentation', - 'Intel', 'manual'), + (master_doc, "dpnp.tex", "dpnp Documentation", "Intel", "manual"), ] @@ -158,10 +161,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'dpnp', 'dpnp Documentation', - [author], 1) -] +man_pages = [(master_doc, "dpnp", "dpnp Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -170,9 +170,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'dpnp', 'dpnp Documentation', - author, 'dpnp', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "dpnp", + "dpnp Documentation", + author, + "dpnp", + "One line description of project.", + "Miscellaneous", + ), ] @@ -183,9 +189,9 @@ autosummary_generate = True intersphinx_mapping = { - 'python': ('https://docs.python.org/3/', None), - 'numpy': ('https://docs.scipy.org/doc/numpy/', None), - 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), + "python": ("https://docs.python.org/3/", None), + "numpy": ("https://docs.scipy.org/doc/numpy/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), } # If true, `todo` and `todoList` produce output, else they produce nothing. @@ -194,4 +200,4 @@ # Napoleon settings napoleon_use_ivar = True napoleon_include_special_with_doc = True -napoleon_custom_sections = ['limitations'] +napoleon_custom_sections = ["limitations"] diff --git a/doc/reference/linalg.rst b/doc/reference/linalg.rst index 564ba4324479..00c1332c9acb 100644 --- a/doc/reference/linalg.rst +++ b/doc/reference/linalg.rst @@ -9,7 +9,7 @@ Matrix and vector products .. autosummary:: :toctree: generated/ :nosignatures: - + dpnp.cross dpnp.dot dpnp.linalg.multi_dot diff --git a/doc/reference/math.rst b/doc/reference/math.rst index 634ccadc8f5c..dee52ba710f8 100644 --- a/doc/reference/math.rst +++ b/doc/reference/math.rst @@ -182,4 +182,4 @@ Miscellaneous dpnp.blackman dpnp.hamming dpnp.hanning - dpnp.kaiser \ No newline at end of file + dpnp.kaiser diff --git a/dpnp/__init__.py b/dpnp/__init__.py index 843c3c111a6b..848c464fcbd2 100644 --- a/dpnp/__init__.py +++ b/dpnp/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,9 +25,11 @@ # ***************************************************************************** import os + mypath = os.path.dirname(os.path.realpath(__file__)) import dpctl + dpctlpath = os.path.dirname(dpctl.__file__) os.environ["PATH"] += os.pathsep + mypath + os.pathsep + dpctlpath @@ -35,11 +37,9 @@ from dpnp.dpnp_array import dpnp_array as ndarray from dpnp.dpnp_flatiter import flatiter as flatiter - from dpnp.dpnp_iface import * from dpnp.dpnp_iface import __all__ as _iface__all__ from dpnp.dpnp_iface_types import * from dpnp.version import __version__ - __all__ = _iface__all__ diff --git a/dpnp/backend/include/dpnp_gen_1arg_1type_tbl.hpp b/dpnp/backend/include/dpnp_gen_1arg_1type_tbl.hpp index f5ee23d755f2..19db411c3b15 100644 --- a/dpnp/backend/include/dpnp_gen_1arg_1type_tbl.hpp +++ b/dpnp/backend/include/dpnp_gen_1arg_1type_tbl.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -102,8 +102,6 @@ MACRO_1ARG_1TYPE_OP(dpnp_recip_c, MACRO_1ARG_1TYPE_OP(dpnp_sign_c, sycl::sign((double)input_elem), q.submit(kernel_func)) // no sycl::sign for int and long -MACRO_1ARG_1TYPE_OP(dpnp_square_c, - input_elem* input_elem, - oneapi::mkl::vm::sqr(q, input1_size, input1_data, result)) +MACRO_1ARG_1TYPE_OP(dpnp_square_c, input_elem* input_elem, oneapi::mkl::vm::sqr(q, input1_size, input1_data, result)) #undef MACRO_1ARG_1TYPE_OP diff --git a/dpnp/backend/include/dpnp_gen_1arg_2type_tbl.hpp b/dpnp/backend/include/dpnp_gen_1arg_2type_tbl.hpp index 71caeef9111a..ab628a4321db 100644 --- a/dpnp/backend/include/dpnp_gen_1arg_2type_tbl.hpp +++ b/dpnp/backend/include/dpnp_gen_1arg_2type_tbl.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -90,83 +90,33 @@ #endif -MACRO_1ARG_2TYPES_OP(dpnp_acos_c, - sycl::acos(input_elem), - oneapi::mkl::vm::acos(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_acosh_c, - sycl::acosh(input_elem), - oneapi::mkl::vm::acosh(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_asin_c, - sycl::asin(input_elem), - oneapi::mkl::vm::asin(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_asinh_c, - sycl::asinh(input_elem), - oneapi::mkl::vm::asinh(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_atan_c, - sycl::atan(input_elem), - oneapi::mkl::vm::atan(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_atanh_c, - sycl::atanh(input_elem), - oneapi::mkl::vm::atanh(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_cbrt_c, - sycl::cbrt(input_elem), - oneapi::mkl::vm::cbrt(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_ceil_c, - sycl::ceil(input_elem), - oneapi::mkl::vm::ceil(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_acos_c, sycl::acos(input_elem), oneapi::mkl::vm::acos(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_acosh_c, sycl::acosh(input_elem), oneapi::mkl::vm::acosh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_asin_c, sycl::asin(input_elem), oneapi::mkl::vm::asin(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_asinh_c, sycl::asinh(input_elem), oneapi::mkl::vm::asinh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_atan_c, sycl::atan(input_elem), oneapi::mkl::vm::atan(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_atanh_c, sycl::atanh(input_elem), oneapi::mkl::vm::atanh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_cbrt_c, sycl::cbrt(input_elem), oneapi::mkl::vm::cbrt(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_ceil_c, sycl::ceil(input_elem), oneapi::mkl::vm::ceil(q, input1_size, input1_data, result)) MACRO_1ARG_2TYPES_OP(dpnp_copyto_c, input_elem, q.submit(kernel_func)) -MACRO_1ARG_2TYPES_OP(dpnp_cos_c, - sycl::cos(input_elem), - oneapi::mkl::vm::cos(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_cosh_c, - sycl::cosh(input_elem), - oneapi::mkl::vm::cosh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_cos_c, sycl::cos(input_elem), oneapi::mkl::vm::cos(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_cosh_c, sycl::cosh(input_elem), oneapi::mkl::vm::cosh(q, input1_size, input1_data, result)) MACRO_1ARG_2TYPES_OP(dpnp_degrees_c, sycl::degrees(input_elem), q.submit(kernel_func)) -MACRO_1ARG_2TYPES_OP(dpnp_exp2_c, - sycl::exp2(input_elem), - oneapi::mkl::vm::exp2(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_exp_c, - sycl::exp(input_elem), - oneapi::mkl::vm::exp(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_expm1_c, - sycl::expm1(input_elem), - oneapi::mkl::vm::expm1(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_fabs_c, - sycl::fabs(input_elem), - oneapi::mkl::vm::abs(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_floor_c, - sycl::floor(input_elem), - oneapi::mkl::vm::floor(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_log10_c, - sycl::log10(input_elem), - oneapi::mkl::vm::log10(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_log1p_c, - sycl::log1p(input_elem), - oneapi::mkl::vm::log1p(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_log2_c, - sycl::log2(input_elem), - oneapi::mkl::vm::log2(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_log_c, - sycl::log(input_elem), - oneapi::mkl::vm::ln(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_exp2_c, sycl::exp2(input_elem), oneapi::mkl::vm::exp2(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_exp_c, sycl::exp(input_elem), oneapi::mkl::vm::exp(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_expm1_c, sycl::expm1(input_elem), oneapi::mkl::vm::expm1(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_fabs_c, sycl::fabs(input_elem), oneapi::mkl::vm::abs(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_floor_c, sycl::floor(input_elem), oneapi::mkl::vm::floor(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_log10_c, sycl::log10(input_elem), oneapi::mkl::vm::log10(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_log1p_c, sycl::log1p(input_elem), oneapi::mkl::vm::log1p(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_log2_c, sycl::log2(input_elem), oneapi::mkl::vm::log2(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_log_c, sycl::log(input_elem), oneapi::mkl::vm::ln(q, input1_size, input1_data, result)) MACRO_1ARG_2TYPES_OP(dpnp_radians_c, sycl::radians(input_elem), q.submit(kernel_func)) -MACRO_1ARG_2TYPES_OP(dpnp_sin_c, - sycl::sin(input_elem), - oneapi::mkl::vm::sin(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_sinh_c, - sycl::sinh(input_elem), - oneapi::mkl::vm::sinh(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_sqrt_c, - sycl::sqrt(input_elem), - oneapi::mkl::vm::sqrt(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_tan_c, - sycl::tan(input_elem), - oneapi::mkl::vm::tan(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_tanh_c, - sycl::tanh(input_elem), - oneapi::mkl::vm::tanh(q, input1_size, input1_data, result)) -MACRO_1ARG_2TYPES_OP(dpnp_trunc_c, - sycl::trunc(input_elem), - oneapi::mkl::vm::trunc(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_sin_c, sycl::sin(input_elem), oneapi::mkl::vm::sin(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_sinh_c, sycl::sinh(input_elem), oneapi::mkl::vm::sinh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_sqrt_c, sycl::sqrt(input_elem), oneapi::mkl::vm::sqrt(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_tan_c, sycl::tan(input_elem), oneapi::mkl::vm::tan(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_tanh_c, sycl::tanh(input_elem), oneapi::mkl::vm::tanh(q, input1_size, input1_data, result)) +MACRO_1ARG_2TYPES_OP(dpnp_trunc_c, sycl::trunc(input_elem), oneapi::mkl::vm::trunc(q, input1_size, input1_data, result)) #undef MACRO_1ARG_2TYPES_OP diff --git a/dpnp/backend/include/dpnp_iface.hpp b/dpnp/backend/include/dpnp_iface.hpp index 42c05f0fd61d..5f08246c8f1e 100644 --- a/dpnp/backend/include/dpnp_iface.hpp +++ b/dpnp/backend/include/dpnp_iface.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -260,11 +260,8 @@ INP_DLLEXPORT void dpnp_full_c(void* array_in, void* result, const size_t size); * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_full_like_c(DPCTLSyclQueueRef q_ref, - void* array_in, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_full_like_c( + DPCTLSyclQueueRef q_ref, void* array_in, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_full_like_c(void* array_in, void* result, size_t size); @@ -517,11 +514,8 @@ INP_DLLEXPORT void dpnp_cross_c(void* result_out, * */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_cumprod_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_cumprod_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_cumprod_c(void* array1_in, void* result1, size_t size); @@ -538,11 +532,8 @@ INP_DLLEXPORT void dpnp_cumprod_c(void* array1_in, void* result1, size_t size); * */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_cumsum_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_cumsum_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_cumsum_c(void* array1_in, void* result1, size_t size); @@ -935,11 +926,8 @@ INP_DLLEXPORT void dpnp_eye_c(void* result, int k, const shape_elem_type* res_sh * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_argsort_c(DPCTLSyclQueueRef q_ref, - void* array, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_argsort_c( + DPCTLSyclQueueRef q_ref, void* array, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_argsort_c(void* array, void* result, size_t size); @@ -982,11 +970,8 @@ INP_DLLEXPORT void dpnp_searchsorted_c( * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_sort_c(DPCTLSyclQueueRef q_ref, - void* array, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_sort_c( + DPCTLSyclQueueRef q_ref, void* array, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_sort_c(void* array, void* result, size_t size); @@ -1242,11 +1227,8 @@ INP_DLLEXPORT void dpnp_identity_c(void* result1, const size_t n); * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_initval_c(DPCTLSyclQueueRef q_ref, - void* result1, - void* value, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_initval_c( + DPCTLSyclQueueRef q_ref, void* result1, void* value, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_initval_c(void* result1, void* value, size_t size); @@ -1428,11 +1410,8 @@ INP_DLLEXPORT void dpnp_min_c(void* array, * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_argmax_c(DPCTLSyclQueueRef q_ref, - void* array, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_argmax_c( + DPCTLSyclQueueRef q_ref, void* array, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_argmax_c(void* array, void* result, size_t size); @@ -1448,11 +1427,8 @@ INP_DLLEXPORT void dpnp_argmax_c(void* array, void* result, size_t size); * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_argmin_c(DPCTLSyclQueueRef q_ref, - void* array, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_argmin_c( + DPCTLSyclQueueRef q_ref, void* array, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_argmin_c(void* array, void* result, size_t size); @@ -1694,11 +1670,8 @@ INP_DLLEXPORT void dpnp_var_c(void* array, * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_invert_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef dpnp_invert_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_invert_c(void* array1_in, void* result, size_t size); @@ -1944,10 +1917,8 @@ INP_DLLEXPORT void dpnp_modf_c(void* array1_in, void* result1_out, void* result2 * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_ones_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef + dpnp_ones_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_ones_c(void* result, size_t size); @@ -1962,10 +1933,8 @@ INP_DLLEXPORT void dpnp_ones_c(void* result, size_t size); * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_ones_like_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef + dpnp_ones_like_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_ones_like_c(void* result, size_t size); @@ -2133,10 +2102,8 @@ INP_DLLEXPORT void * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_zeros_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef + dpnp_zeros_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_zeros_c(void* result, size_t size); @@ -2151,10 +2118,8 @@ INP_DLLEXPORT void dpnp_zeros_c(void* result, size_t size); * @param [in] dep_event_vec_ref Reference to vector of SYCL events. */ template -INP_DLLEXPORT DPCTLSyclEventRef dpnp_zeros_like_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref); +INP_DLLEXPORT DPCTLSyclEventRef + dpnp_zeros_like_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref); template INP_DLLEXPORT void dpnp_zeros_like_c(void* result, size_t size); diff --git a/dpnp/backend/include/dpnp_iface_fft.hpp b/dpnp/backend/include/dpnp_iface_fft.hpp index 47d9a7b952b0..718a619773ed 100644 --- a/dpnp/backend/include/dpnp_iface_fft.hpp +++ b/dpnp/backend/include/dpnp_iface_fft.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -88,7 +88,6 @@ INP_DLLEXPORT void dpnp_fft_fft_c(const void* array1_in, size_t inverse, const size_t norm); - /** * @ingroup BACKEND_FFT_API * @brief 1D discrete Fourier Transform. diff --git a/dpnp/backend/include/dpnp_iface_random.hpp b/dpnp/backend/include/dpnp_iface_random.hpp index e6396b9e0aa2..5372cb177258 100644 --- a/dpnp/backend/include/dpnp_iface_random.hpp +++ b/dpnp/backend/include/dpnp_iface_random.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -350,8 +350,8 @@ INP_DLLEXPORT DPCTLSyclEventRef dpnp_rng_multinomial_c(DPCTLSyclQueueRef q_ref, const DPCTLEventVectorRef dep_event_vec_ref); template -INP_DLLEXPORT void dpnp_rng_multinomial_c( - void* result, const int ntrial, const double* p_in, const size_t p_size, const size_t size); +INP_DLLEXPORT void + dpnp_rng_multinomial_c(void* result, const int ntrial, const double* p_in, const size_t p_size, const size_t size); /** * @ingroup BACKEND_RANDOM_API diff --git a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp index 3b7f362d51a5..ded102d03e9e 100644 --- a/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -83,12 +83,7 @@ void dpnp_arange_c(size_t start, size_t step, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_arange_c<_DataType>(q_ref, - start, - step, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_arange_c<_DataType>(q_ref, start, step, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -96,12 +91,8 @@ template void (*dpnp_arange_default_c)(size_t, size_t, void*, size_t) = dpnp_arange_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_arange_ext_c)(DPCTLSyclQueueRef, - size_t, - size_t, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_arange_c<_DataType>; +DPCTLSyclEventRef (*dpnp_arange_ext_c)(DPCTLSyclQueueRef, size_t, size_t, void*, size_t, const DPCTLEventVectorRef) = + dpnp_arange_c<_DataType>; template DPCTLSyclEventRef dpnp_diag_c(DPCTLSyclQueueRef q_ref, @@ -123,8 +114,8 @@ DPCTLSyclEventRef dpnp_diag_c(DPCTLSyclQueueRef q_ref, const size_t input1_size = std::accumulate(shape, shape + ndim, 1, std::multiplies()); const size_t result_size = std::accumulate(res_shape, res_shape + res_ndim, 1, std::multiplies()); - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,v_in, input1_size, true); - DPNPC_ptr_adapter<_DataType> result_ptr(q_ref,result1, result_size, true, true); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, v_in, input1_size, true); + DPNPC_ptr_adapter<_DataType> result_ptr(q_ref, result1, result_size, true, true); _DataType* v = input1_ptr.get_ptr(); _DataType* result = result_ptr.get_ptr(); @@ -161,26 +152,14 @@ void dpnp_diag_c(void* v_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_diag_c<_DataType>(q_ref, - v_in, - result1, - k, - shape, - res_shape, - ndim, - res_ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_diag_c<_DataType>(q_ref, v_in, result1, k, shape, res_shape, ndim, res_ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_diag_default_c)(void*, - void*, - const int, - shape_elem_type*, - shape_elem_type*, - const size_t, - const size_t) = dpnp_diag_c<_DataType>; +void (*dpnp_diag_default_c)(void*, void*, const int, shape_elem_type*, shape_elem_type*, const size_t, const size_t) = + dpnp_diag_c<_DataType>; template DPCTLSyclEventRef (*dpnp_diag_ext_c)(DPCTLSyclQueueRef, @@ -219,7 +198,7 @@ DPCTLSyclEventRef dpnp_eye_c(DPCTLSyclQueueRef q_ref, size_t result_size = res_shape[0] * res_shape[1]; - DPNPC_ptr_adapter<_DataType> result_ptr(q_ref,result1, result_size, true, true); + DPNPC_ptr_adapter<_DataType> result_ptr(q_ref, result1, result_size, true, true); _DataType* result = result_ptr.get_ptr(); int diag_val_; @@ -251,11 +230,7 @@ void dpnp_eye_c(void* result1, int k, const shape_elem_type* res_shape) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_eye_c<_DataType>(q_ref, - result1, - k, - res_shape, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_eye_c<_DataType>(q_ref, result1, k, res_shape, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -263,11 +238,8 @@ template void (*dpnp_eye_default_c)(void*, int, const shape_elem_type*) = dpnp_eye_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_eye_ext_c)(DPCTLSyclQueueRef, - void*, - int, - const shape_elem_type*, - const DPCTLEventVectorRef) = dpnp_eye_c<_DataType>; +DPCTLSyclEventRef (*dpnp_eye_ext_c)(DPCTLSyclQueueRef, void*, int, const shape_elem_type*, const DPCTLEventVectorRef) = + dpnp_eye_c<_DataType>; template DPCTLSyclEventRef dpnp_full_c(DPCTLSyclQueueRef q_ref, @@ -284,11 +256,7 @@ void dpnp_full_c(void* array_in, void* result, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_full_c<_DataType>(q_ref, - array_in, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_full_c<_DataType>(q_ref, array_in, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -296,11 +264,8 @@ template void (*dpnp_full_default_c)(void*, void*, const size_t) = dpnp_full_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_full_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_full_c<_DataType>; +DPCTLSyclEventRef (*dpnp_full_ext_c)(DPCTLSyclQueueRef, void*, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_full_c<_DataType>; template DPCTLSyclEventRef dpnp_full_like_c(DPCTLSyclQueueRef q_ref, @@ -317,11 +282,7 @@ void dpnp_full_like_c(void* array_in, void* result, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_full_like_c<_DataType>(q_ref, - array_in, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_full_like_c<_DataType>(q_ref, array_in, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -329,20 +290,15 @@ template void (*dpnp_full_like_default_c)(void*, void*, const size_t) = dpnp_full_like_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_full_like_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_full_like_c<_DataType>; +DPCTLSyclEventRef (*dpnp_full_like_ext_c)(DPCTLSyclQueueRef, void*, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_full_like_c<_DataType>; template class dpnp_identity_c_kernel; template -DPCTLSyclEventRef dpnp_identity_c(DPCTLSyclQueueRef q_ref, - void* result1, - const size_t n, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef + dpnp_identity_c(DPCTLSyclQueueRef q_ref, void* result1, const size_t n, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -382,10 +338,7 @@ void dpnp_identity_c(void* result1, const size_t n) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_identity_c<_DataType>(q_ref, - result1, - n, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_identity_c<_DataType>(q_ref, result1, n, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -393,19 +346,15 @@ template void (*dpnp_identity_default_c)(void*, const size_t) = dpnp_identity_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_identity_ext_c)(DPCTLSyclQueueRef, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_identity_c<_DataType>; +DPCTLSyclEventRef (*dpnp_identity_ext_c)(DPCTLSyclQueueRef, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_identity_c<_DataType>; template class dpnp_ones_c_kernel; template -DPCTLSyclEventRef dpnp_ones_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef + dpnp_ones_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { sycl::queue q = *(reinterpret_cast(q_ref)); @@ -425,10 +374,7 @@ void dpnp_ones_c(void* result, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_ones_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_ones_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -436,16 +382,12 @@ template void (*dpnp_ones_default_c)(void*, size_t) = dpnp_ones_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_ones_ext_c)(DPCTLSyclQueueRef, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_ones_c<_DataType>; +DPCTLSyclEventRef (*dpnp_ones_ext_c)(DPCTLSyclQueueRef, void*, size_t, const DPCTLEventVectorRef) = + dpnp_ones_c<_DataType>; template -DPCTLSyclEventRef dpnp_ones_like_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef + dpnp_ones_like_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { return dpnp_ones_c<_DataType>(q_ref, result, size, dep_event_vec_ref); } @@ -455,10 +397,7 @@ void dpnp_ones_like_c(void* result, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_ones_like_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_ones_like_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -466,10 +405,8 @@ template void (*dpnp_ones_like_default_c)(void*, size_t) = dpnp_ones_like_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_ones_like_ext_c)(DPCTLSyclQueueRef, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_ones_like_c<_DataType>; +DPCTLSyclEventRef (*dpnp_ones_like_ext_c)(DPCTLSyclQueueRef, void*, size_t, const DPCTLEventVectorRef) = + dpnp_ones_like_c<_DataType>; template DPCTLSyclEventRef dpnp_ptp_c(DPCTLSyclQueueRef q_ref, @@ -509,8 +446,8 @@ DPCTLSyclEventRef dpnp_ptp_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,input1_in, input_size, true); - DPNPC_ptr_adapter<_DataType> result_ptr(q_ref,result1_out, result_size, false, true); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, input1_in, input_size, true); + DPNPC_ptr_adapter<_DataType> result_ptr(q_ref, result1_out, result_size, false, true); _DataType* arr = input1_ptr.get_ptr(); _DataType* result = result_ptr.get_ptr(); @@ -524,22 +461,24 @@ DPCTLSyclEventRef dpnp_ptp_c(DPCTLSyclQueueRef q_ref, reinterpret_cast(sycl::malloc_shared(result_ndim * sizeof(shape_elem_type), q)); get_shape_offsets_inkernel(result_shape, result_ndim, _strides); - e3_ref = dpnp_subtract_c<_DataType, _DataType, _DataType>(q_ref, result, - result_size, - result_ndim, - result_shape, - result_strides, - max_arr, - result_size, - result_ndim, - result_shape, - _strides, - min_arr, - result_size, - result_ndim, - result_shape, - _strides, - NULL, NULL); + e3_ref = dpnp_subtract_c<_DataType, _DataType, _DataType>(q_ref, + result, + result_size, + result_ndim, + result_shape, + result_strides, + max_arr, + result_size, + result_ndim, + result_shape, + _strides, + min_arr, + result_size, + result_ndim, + result_shape, + _strides, + NULL, + NULL); DPCTLEvent_Wait(e1_ref); DPCTLEvent_Wait(e2_ref); @@ -638,8 +577,8 @@ DPCTLSyclEventRef dpnp_vander_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); - DPNPC_ptr_adapter<_DataType_input> input1_ptr(q_ref,array1_in, size_in, true); - DPNPC_ptr_adapter<_DataType_output> result_ptr(q_ref,result1, size_in * N, true, true); + DPNPC_ptr_adapter<_DataType_input> input1_ptr(q_ref, array1_in, size_in, true); + DPNPC_ptr_adapter<_DataType_output> result_ptr(q_ref, result1, size_in * N, true, true); const _DataType_input* array_in = input1_ptr.get_ptr(); _DataType_output* result = result_ptr.get_ptr(); @@ -690,22 +629,14 @@ void dpnp_vander_c(const void* array1_in, void* result1, const size_t size_in, c { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_vander_c<_DataType_input, _DataType_output>(q_ref, - array1_in, - result1, - size_in, - N, - increasing, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_vander_c<_DataType_input, _DataType_output>( + q_ref, array1_in, result1, size_in, N, increasing, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_vander_default_c)(const void*, - void*, - const size_t, - const size_t, - const int) = dpnp_vander_c<_DataType_input, _DataType_output>; +void (*dpnp_vander_default_c)(const void*, void*, const size_t, const size_t, const int) = + dpnp_vander_c<_DataType_input, _DataType_output>; template DPCTLSyclEventRef (*dpnp_vander_ext_c)(DPCTLSyclQueueRef, @@ -746,7 +677,7 @@ DPCTLSyclEventRef dpnp_trace_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,array1_in, size * last_dim); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, array1_in, size * last_dim); const _DataType* input = input1_ptr.get_ptr(); _ResultType* result = reinterpret_cast<_ResultType*>(result_in); @@ -780,20 +711,14 @@ void dpnp_trace_c(const void* array1_in, void* result_in, const shape_elem_type* { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_trace_c<_DataType, _ResultType>(q_ref, - array1_in, - result_in, - shape_, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_trace_c<_DataType, _ResultType>(q_ref, array1_in, result_in, shape_, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_trace_default_c)(const void*, - void*, - const shape_elem_type*, - const size_t) = dpnp_trace_c<_DataType, _ResultType>; +void (*dpnp_trace_default_c)(const void*, void*, const shape_elem_type*, const size_t) = + dpnp_trace_c<_DataType, _ResultType>; template DPCTLSyclEventRef (*dpnp_trace_ext_c)(DPCTLSyclQueueRef, @@ -867,28 +792,16 @@ void dpnp_tri_c(void* result1, const size_t N, const size_t M, const int k) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_tri_c<_DataType>(q_ref, - result1, - N, - M, - k, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_tri_c<_DataType>(q_ref, result1, N, M, k, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_tri_default_c)(void*, - const size_t, - const size_t, - const int) = dpnp_tri_c<_DataType>; +void (*dpnp_tri_default_c)(void*, const size_t, const size_t, const int) = dpnp_tri_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_tri_ext_c)(DPCTLSyclQueueRef, - void*, - const size_t, - const size_t, - const int, - const DPCTLEventVectorRef) = dpnp_tri_c<_DataType>; +DPCTLSyclEventRef (*dpnp_tri_ext_c)( + DPCTLSyclQueueRef, void*, const size_t, const size_t, const int, const DPCTLEventVectorRef) = dpnp_tri_c<_DataType>; template DPCTLSyclEventRef dpnp_tril_c(DPCTLSyclQueueRef q_ref, @@ -935,8 +848,8 @@ DPCTLSyclEventRef dpnp_tril_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,array_in, input_size, true); - DPNPC_ptr_adapter<_DataType> result_ptr(q_ref,result1, res_size, true, true); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, array_in, input_size, true); + DPNPC_ptr_adapter<_DataType> result_ptr(q_ref, result1, res_size, true, true); _DataType* array_m = input1_ptr.get_ptr(); _DataType* result = result_ptr.get_ptr(); @@ -1018,26 +931,14 @@ void dpnp_tril_c(void* array_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_tril_c<_DataType>(q_ref, - array_in, - result1, - k, - shape, - res_shape, - ndim, - res_ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_tril_c<_DataType>(q_ref, array_in, result1, k, shape, res_shape, ndim, res_ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_tril_default_c)(void*, - void*, - const int, - shape_elem_type*, - shape_elem_type*, - const size_t, - const size_t) = dpnp_tril_c<_DataType>; +void (*dpnp_tril_default_c)(void*, void*, const int, shape_elem_type*, shape_elem_type*, const size_t, const size_t) = + dpnp_tril_c<_DataType>; template DPCTLSyclEventRef (*dpnp_tril_ext_c)(DPCTLSyclQueueRef, @@ -1095,8 +996,8 @@ DPCTLSyclEventRef dpnp_triu_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,array_in, input_size, true); - DPNPC_ptr_adapter<_DataType> result_ptr(q_ref,result1, res_size, true, true); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, array_in, input_size, true); + DPNPC_ptr_adapter<_DataType> result_ptr(q_ref, result1, res_size, true, true); _DataType* array_m = input1_ptr.get_ptr(); _DataType* result = result_ptr.get_ptr(); @@ -1178,26 +1079,14 @@ void dpnp_triu_c(void* array_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_triu_c<_DataType>(q_ref, - array_in, - result1, - k, - shape, - res_shape, - ndim, - res_ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_triu_c<_DataType>(q_ref, array_in, result1, k, shape, res_shape, ndim, res_ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_triu_default_c)(void*, - void*, - const int, - shape_elem_type*, - shape_elem_type*, - const size_t, - const size_t) = dpnp_triu_c<_DataType>; +void (*dpnp_triu_default_c)(void*, void*, const int, shape_elem_type*, shape_elem_type*, const size_t, const size_t) = + dpnp_triu_c<_DataType>; template DPCTLSyclEventRef (*dpnp_triu_ext_c)(DPCTLSyclQueueRef, @@ -1211,10 +1100,8 @@ DPCTLSyclEventRef (*dpnp_triu_ext_c)(DPCTLSyclQueueRef, const DPCTLEventVectorRef) = dpnp_triu_c<_DataType>; template -DPCTLSyclEventRef dpnp_zeros_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef + dpnp_zeros_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { sycl::queue q = *(reinterpret_cast(q_ref)); @@ -1234,10 +1121,7 @@ void dpnp_zeros_c(void* result, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_zeros_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_zeros_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1245,16 +1129,12 @@ template void (*dpnp_zeros_default_c)(void*, size_t) = dpnp_zeros_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_zeros_ext_c)(DPCTLSyclQueueRef, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_zeros_c<_DataType>; +DPCTLSyclEventRef (*dpnp_zeros_ext_c)(DPCTLSyclQueueRef, void*, size_t, const DPCTLEventVectorRef) = + dpnp_zeros_c<_DataType>; template -DPCTLSyclEventRef dpnp_zeros_like_c(DPCTLSyclQueueRef q_ref, - void* result, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef + dpnp_zeros_like_c(DPCTLSyclQueueRef q_ref, void* result, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { return dpnp_zeros_c<_DataType>(q_ref, result, size, dep_event_vec_ref); } @@ -1264,10 +1144,7 @@ void dpnp_zeros_like_c(void* result, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_zeros_like_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_zeros_like_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1275,10 +1152,8 @@ template void (*dpnp_zeros_like_default_c)(void*, size_t) = dpnp_zeros_like_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_zeros_like_ext_c)(DPCTLSyclQueueRef, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_zeros_like_c<_DataType>; +DPCTLSyclEventRef (*dpnp_zeros_like_ext_c)(DPCTLSyclQueueRef, void*, size_t, const DPCTLEventVectorRef) = + dpnp_zeros_like_c<_DataType>; void func_map_init_arraycreation(func_map_t& fmap) { @@ -1317,32 +1192,30 @@ void func_map_init_arraycreation(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_FULL][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_full_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_full_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_full_default_c}; - fmap[DPNPFuncName::DPNP_FN_FULL][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_full_default_c>}; + fmap[DPNPFuncName::DPNP_FN_FULL][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_full_default_c>}; fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_full_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_full_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_full_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_full_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_full_ext_c}; - fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_full_ext_c>}; + fmap[DPNPFuncName::DPNP_FN_FULL_EXT][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_full_ext_c>}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_full_like_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_full_like_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_full_like_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_full_like_default_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_full_like_default_c}; - fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_C128][eft_C128] = { - eft_C128, (void*)dpnp_full_like_default_c>}; + fmap[DPNPFuncName::DPNP_FN_FULL_LIKE][eft_C128][eft_C128] = {eft_C128, + (void*)dpnp_full_like_default_c>}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_full_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_full_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_full_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_full_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_full_like_ext_c}; - fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_C128][eft_C128] = { - eft_C128, (void*)dpnp_full_like_ext_c>}; + fmap[DPNPFuncName::DPNP_FN_FULL_LIKE_EXT][eft_C128][eft_C128] = {eft_C128, + (void*)dpnp_full_like_ext_c>}; fmap[DPNPFuncName::DPNP_FN_IDENTITY][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_identity_default_c}; fmap[DPNPFuncName::DPNP_FN_IDENTITY][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_identity_default_c}; @@ -1365,32 +1238,30 @@ void func_map_init_arraycreation(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_ONES][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_ones_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_ones_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_ones_default_c}; - fmap[DPNPFuncName::DPNP_FN_ONES][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_ones_default_c>}; + fmap[DPNPFuncName::DPNP_FN_ONES][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_ones_default_c>}; fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_ones_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_ones_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_ones_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_ones_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_ones_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_ones_ext_c>}; + fmap[DPNPFuncName::DPNP_FN_ONES_EXT][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_ones_ext_c>}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_ones_like_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_ones_like_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_ones_like_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_ones_like_default_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_ones_like_default_c}; - fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_C128][eft_C128] = { - eft_C128, (void*)dpnp_ones_like_default_c>}; + fmap[DPNPFuncName::DPNP_FN_ONES_LIKE][eft_C128][eft_C128] = {eft_C128, + (void*)dpnp_ones_like_default_c>}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_ones_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_ones_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_ones_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_ones_like_ext_c}; fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_ones_like_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_C128][eft_C128] = { - eft_C128, (void*)dpnp_ones_like_ext_c>}; + fmap[DPNPFuncName::DPNP_FN_ONES_LIKE_EXT][eft_C128][eft_C128] = {eft_C128, + (void*)dpnp_ones_like_ext_c>}; fmap[DPNPFuncName::DPNP_FN_PTP][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_ptp_default_c}; fmap[DPNPFuncName::DPNP_FN_PTP][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_ptp_default_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_bitwise.cpp b/dpnp/backend/kernels/dpnp_krnl_bitwise.cpp index b64670be4e09..9c00481bd364 100644 --- a/dpnp/backend/kernels/dpnp_krnl_bitwise.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_bitwise.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -35,11 +35,8 @@ template class dpnp_invert_c_kernel; template -DPCTLSyclEventRef dpnp_invert_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_invert_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -78,11 +75,7 @@ void dpnp_invert_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_invert_c<_DataType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_invert_c<_DataType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -90,11 +83,8 @@ template void (*dpnp_invert_default_c)(void*, void*, size_t) = dpnp_invert_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_invert_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_invert_c<_DataType>; +DPCTLSyclEventRef (*dpnp_invert_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_invert_c<_DataType>; static void func_map_init_bitwise_1arg_1type(func_map_t& fmap) { diff --git a/dpnp/backend/kernels/dpnp_krnl_common.cpp b/dpnp/backend/kernels/dpnp_krnl_common.cpp index 7ae9127041a0..a4f57e56266f 100644 --- a/dpnp/backend/kernels/dpnp_krnl_common.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_common.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -91,11 +91,8 @@ void dpnp_astype_c(const void* array1_in, void* result1, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_astype_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_astype_c<_DataType, _ResultType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -103,11 +100,8 @@ template void (*dpnp_astype_default_c)(const void*, void*, const size_t) = dpnp_astype_c<_DataType, _ResultType>; template -DPCTLSyclEventRef (*dpnp_astype_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_astype_c<_DataType, _ResultType>; +DPCTLSyclEventRef (*dpnp_astype_ext_c)(DPCTLSyclQueueRef, const void*, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_astype_c<_DataType, _ResultType>; template class dpnp_dot_c_kernel; @@ -329,69 +323,69 @@ DPCTLSyclEventRef dpnp_dot_c(DPCTLSyclQueueRef q_ref, // there is a difference of behavior with trans and sizes params in previous version of GEMM // only new version is supported, in case of old version computation goes in common way #if INTEL_MKL_VERSION >= 20210004 - // is mat1 F-contiguous, C-contiguous - bool mat1_f_contig = ( - ((ext_input1_shape[0] == 1) || (ext_input1_strides[0] == 1)) && - ((ext_input1_shape[1] == 1) || (ext_input1_strides[1] == ext_input1_shape[0]))); - bool mat1_c_contig = ( - ((ext_input1_shape[1] == 1) || (ext_input1_strides[1] == 1)) && - ((ext_input1_shape[0] == 1) || (ext_input1_strides[0] == ext_input1_shape[1]))); - // is mat2 F-contiguous, C-contiguous - bool mat2_f_contig = ( - ((ext_input2_shape[0] == 1) || (ext_input2_strides[0] == 1)) && - ((ext_input2_shape[1] == 1) || (ext_input2_strides[1] == ext_input2_shape[0]))); - bool mat2_c_contig = ( - ((ext_input2_shape[1] == 1) || (ext_input2_strides[1] == 1)) && - ((ext_input2_shape[0] == 1) || (ext_input2_strides[0] == ext_input2_shape[1]))); - - if ((mat1_f_contig || mat1_c_contig) && (mat2_f_contig || mat2_c_contig)) { - oneapi::mkl::transpose trans1 = - (mat1_f_contig && !mat1_c_contig) ? oneapi::mkl::transpose::trans : oneapi::mkl::transpose::nontrans; - oneapi::mkl::transpose trans2 = - (mat2_f_contig && !mat2_c_contig) ? oneapi::mkl::transpose::trans : oneapi::mkl::transpose::nontrans; - - const size_t size_m = ext_input1_shape[0]; - const size_t size_n = ext_input2_shape[1]; - const size_t size_k = ext_input1_shape[1]; - - const std::int64_t lda = - trans1 == oneapi::mkl::transpose::nontrans ? ext_input1_strides[0] : ext_input1_strides[1]; - const std::int64_t ldb = - trans2 == oneapi::mkl::transpose::nontrans ? ext_input2_strides[0] : ext_input2_strides[1]; - - // definition of ldc will be another for result with non-standard (c-contiguous) strides - // const std::int64_t ldc = result_strides[0] == 1 ? result_strides[1] : result_strides[0]; - const std::int64_t ldc = size_n; - - try { - sycl::event event = mkl_blas_rm::gemm(q, - trans1, - trans2, - size_m, - size_n, - size_k, - _DataType_output(1), // alpha - input1, - lda, - input2, - ldb, - _DataType_output(0), // beta - result, - ldc); - event.wait(); - delete[] ext_input1_shape; - delete[] ext_input1_strides; - delete[] ext_input2_shape; - delete[] ext_input2_strides; - delete[] ext_result_shape; - - return event_ref; - } catch (const std::exception &e) { - // do nothing, proceed to general case - } + // is mat1 F-contiguous, C-contiguous + bool mat1_f_contig = (((ext_input1_shape[0] == 1) || (ext_input1_strides[0] == 1)) && + ((ext_input1_shape[1] == 1) || (ext_input1_strides[1] == ext_input1_shape[0]))); + bool mat1_c_contig = (((ext_input1_shape[1] == 1) || (ext_input1_strides[1] == 1)) && + ((ext_input1_shape[0] == 1) || (ext_input1_strides[0] == ext_input1_shape[1]))); + // is mat2 F-contiguous, C-contiguous + bool mat2_f_contig = (((ext_input2_shape[0] == 1) || (ext_input2_strides[0] == 1)) && + ((ext_input2_shape[1] == 1) || (ext_input2_strides[1] == ext_input2_shape[0]))); + bool mat2_c_contig = (((ext_input2_shape[1] == 1) || (ext_input2_strides[1] == 1)) && + ((ext_input2_shape[0] == 1) || (ext_input2_strides[0] == ext_input2_shape[1]))); + + if ((mat1_f_contig || mat1_c_contig) && (mat2_f_contig || mat2_c_contig)) + { + oneapi::mkl::transpose trans1 = (mat1_f_contig && !mat1_c_contig) ? oneapi::mkl::transpose::trans + : oneapi::mkl::transpose::nontrans; + oneapi::mkl::transpose trans2 = (mat2_f_contig && !mat2_c_contig) ? oneapi::mkl::transpose::trans + : oneapi::mkl::transpose::nontrans; + + const size_t size_m = ext_input1_shape[0]; + const size_t size_n = ext_input2_shape[1]; + const size_t size_k = ext_input1_shape[1]; + + const std::int64_t lda = + trans1 == oneapi::mkl::transpose::nontrans ? ext_input1_strides[0] : ext_input1_strides[1]; + const std::int64_t ldb = + trans2 == oneapi::mkl::transpose::nontrans ? ext_input2_strides[0] : ext_input2_strides[1]; + + // definition of ldc will be another for result with non-standard (c-contiguous) strides + // const std::int64_t ldc = result_strides[0] == 1 ? result_strides[1] : result_strides[0]; + const std::int64_t ldc = size_n; + + try + { + sycl::event event = mkl_blas_rm::gemm(q, + trans1, + trans2, + size_m, + size_n, + size_k, + _DataType_output(1), // alpha + input1, + lda, + input2, + ldb, + _DataType_output(0), // beta + result, + ldc); + event.wait(); + delete[] ext_input1_shape; + delete[] ext_input1_strides; + delete[] ext_input2_shape; + delete[] ext_input2_strides; + delete[] ext_result_shape; + + return event_ref; + } + catch (const std::exception& e) + { + // do nothing, proceed to general case + } #endif - } - } + } + } } std::vector dot_events; @@ -513,7 +507,8 @@ DPCTLSyclEventRef (*dpnp_dot_ext_c)(DPCTLSyclQueueRef, const size_t, const shape_elem_type*, const shape_elem_type*, - const DPCTLEventVectorRef) = dpnp_dot_c<_DataType_output, _DataType_input1, _DataType_input2>; + const DPCTLEventVectorRef) = + dpnp_dot_c<_DataType_output, _DataType_input1, _DataType_input2>; template DPCTLSyclEventRef dpnp_eig_c(DPCTLSyclQueueRef q_ref, @@ -559,8 +554,8 @@ DPCTLSyclEventRef dpnp_eig_c(DPCTLSyclQueueRef q_ref, const std::int64_t lda = std::max(1UL, size); - const std::int64_t scratchpad_size = mkl_lapack::syevd_scratchpad_size( - q, oneapi::mkl::job::vec, oneapi::mkl::uplo::upper, size, lda); + const std::int64_t scratchpad_size = + mkl_lapack::syevd_scratchpad_size(q, oneapi::mkl::job::vec, oneapi::mkl::uplo::upper, size, lda); // https://github.com/IntelPython/dpnp/issues/1005 // Test tests/test_linalg.py::test_eig_arange raises 2 issues in dpnp_eig_c on CPU @@ -607,12 +602,8 @@ void dpnp_eig_c(const void* array_in, void* result1, void* result2, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_eig_c<_DataType, _ResultType>(q_ref, - array_in, - result1, - result2, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_eig_c<_DataType, _ResultType>(q_ref, array_in, result1, result2, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -620,12 +611,8 @@ template void (*dpnp_eig_default_c)(const void*, void*, void*, size_t) = dpnp_eig_c<_DataType, _ResultType>; template -DPCTLSyclEventRef (*dpnp_eig_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_eig_c<_DataType, _ResultType>; +DPCTLSyclEventRef (*dpnp_eig_ext_c)(DPCTLSyclQueueRef, const void*, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_eig_c<_DataType, _ResultType>; template DPCTLSyclEventRef dpnp_eigvals_c(DPCTLSyclQueueRef q_ref, @@ -668,8 +655,8 @@ DPCTLSyclEventRef dpnp_eigvals_c(DPCTLSyclQueueRef q_ref, const std::int64_t lda = std::max(1UL, size); - const std::int64_t scratchpad_size = mkl_lapack::syevd_scratchpad_size( - q, oneapi::mkl::job::vec, oneapi::mkl::uplo::upper, size, lda); + const std::int64_t scratchpad_size = + mkl_lapack::syevd_scratchpad_size(q, oneapi::mkl::job::vec, oneapi::mkl::uplo::upper, size, lda); double* scratchpad = reinterpret_cast(sycl::malloc_shared(scratchpad_size * sizeof(double), q)); @@ -701,11 +688,8 @@ void dpnp_eigvals_c(const void* array_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_eigvals_c<_DataType, _ResultType>(q_ref, - array_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_eigvals_c<_DataType, _ResultType>(q_ref, array_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -713,21 +697,15 @@ template void (*dpnp_eigvals_default_c)(const void*, void*, size_t) = dpnp_eigvals_c<_DataType, _ResultType>; template -DPCTLSyclEventRef (*dpnp_eigvals_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_eigvals_c<_DataType, _ResultType>; +DPCTLSyclEventRef (*dpnp_eigvals_ext_c)(DPCTLSyclQueueRef, const void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_eigvals_c<_DataType, _ResultType>; template class dpnp_initval_c_kernel; template -DPCTLSyclEventRef dpnp_initval_c(DPCTLSyclQueueRef q_ref, - void* result1, - void* value, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_initval_c( + DPCTLSyclQueueRef q_ref, void* result1, void* value, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -768,24 +746,16 @@ void dpnp_initval_c(void* result1, void* value, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_initval_c<_DataType>(q_ref, - result1, - value, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_initval_c<_DataType>(q_ref, result1, value, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); - } template void (*dpnp_initval_default_c)(void*, void*, size_t) = dpnp_initval_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_initval_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_initval_c<_DataType>; +DPCTLSyclEventRef (*dpnp_initval_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_initval_c<_DataType>; template class dpnp_matmul_c_kernel; @@ -1041,71 +1011,39 @@ void func_map_init_linalg(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_ASTYPE_EXT][eft_C128][eft_C128] = { eft_C128, (void*)dpnp_astype_ext_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_LNG] = {eft_LNG, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_INT] = {eft_LNG, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_default_c}; - fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_default_c}; - - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_LNG] = {eft_LNG, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_INT] = {eft_LNG, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_dot_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_dot_default_c}; + fmap[DPNPFuncName::DPNP_FN_DOT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_dot_default_c}; + + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_dot_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DOT_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_dot_ext_c}; fmap[DPNPFuncName::DPNP_FN_EIG][eft_INT][eft_INT] = {eft_DBL, (void*)dpnp_eig_default_c}; fmap[DPNPFuncName::DPNP_FN_EIG][eft_LNG][eft_LNG] = {eft_DBL, (void*)dpnp_eig_default_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp b/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp index 63b6195e7889..c868d7e020e4 100644 --- a/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_elemwise.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -743,16 +743,14 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_COPY][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_copy_c_default}; fmap[DPNPFuncName::DPNP_FN_COPY][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_copy_c_default}; fmap[DPNPFuncName::DPNP_FN_COPY][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_copy_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPY][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_copy_c_default>}; + fmap[DPNPFuncName::DPNP_FN_COPY][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_copy_c_default>}; fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_BLN][eft_BLN] = {eft_BLN, (void*)dpnp_copy_c_ext}; fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_copy_c_ext}; fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_copy_c_ext}; fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_copy_c_ext}; fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_copy_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_C128][eft_C128] = {eft_C128, - (void*)dpnp_copy_c_ext>}; + fmap[DPNPFuncName::DPNP_FN_COPY_EXT][eft_C128][eft_C128] = {eft_C128, (void*)dpnp_copy_c_ext>}; fmap[DPNPFuncName::DPNP_FN_ERF][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_erf_c_default}; fmap[DPNPFuncName::DPNP_FN_ERF][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_erf_c_default}; @@ -919,8 +917,8 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap) { \ DPNPC_id<_DataType_input1>* input1_it; \ const size_t input1_it_size_in_bytes = sizeof(DPNPC_id<_DataType_input1>); \ - input1_it = reinterpret_cast*>(dpnp_memory_alloc_c(q_ref, \ - input1_it_size_in_bytes)); \ + input1_it = \ + reinterpret_cast*>(dpnp_memory_alloc_c(q_ref, input1_it_size_in_bytes)); \ new (input1_it) \ DPNPC_id<_DataType_input1>(q_ref, input1_data, input1_shape_data, input1_strides_data, input1_ndim); \ \ @@ -928,8 +926,8 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap) \ DPNPC_id<_DataType_input2>* input2_it; \ const size_t input2_it_size_in_bytes = sizeof(DPNPC_id<_DataType_input2>); \ - input2_it = reinterpret_cast*>(dpnp_memory_alloc_c(q_ref, \ - input2_it_size_in_bytes)); \ + input2_it = \ + reinterpret_cast*>(dpnp_memory_alloc_c(q_ref, input2_it_size_in_bytes)); \ new (input2_it) \ DPNPC_id<_DataType_input2>(q_ref, input2_data, input2_shape_data, input2_strides_data, input2_ndim); \ \ @@ -1046,26 +1044,25 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap) { \ DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); \ DPCTLEventVectorRef dep_event_vec_ref = nullptr; \ - DPCTLSyclEventRef event_ref = __name__<_DataType_output, _DataType_input1, _DataType_input2>( \ - q_ref, \ - result_out, \ - result_size, \ - result_ndim, \ - result_shape, \ - result_strides, \ - input1_in, \ - input1_size, \ - input1_ndim, \ - input1_shape, \ - input1_strides, \ - input2_in, \ - input2_size, \ - input2_ndim, \ - input2_shape, \ - input2_strides, \ - where, \ - dep_event_vec_ref \ - ); \ + DPCTLSyclEventRef event_ref = \ + __name__<_DataType_output, _DataType_input1, _DataType_input2>(q_ref, \ + result_out, \ + result_size, \ + result_ndim, \ + result_shape, \ + result_strides, \ + input1_in, \ + input1_size, \ + input1_ndim, \ + input1_shape, \ + input1_strides, \ + input2_in, \ + input2_size, \ + input2_ndim, \ + input2_shape, \ + input2_strides, \ + where, \ + dep_event_vec_ref); \ DPCTLEvent_WaitAndThrow(event_ref); \ } \ \ @@ -1105,79 +1102,46 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap) const shape_elem_type*, \ const shape_elem_type*, \ const size_t*, \ - const DPCTLEventVectorRef) = __name__<_DataType_output, \ - _DataType_input1, \ - _DataType_input2>; + const DPCTLEventVectorRef) = \ + __name__<_DataType_output, _DataType_input1, _DataType_input2>; #include static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) { - fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_LNG] = {eft_LNG, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_INT] = {eft_LNG, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_default}; - fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_default}; - - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_LNG] = {eft_LNG, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_INT] = {eft_LNG, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_add_c_ext}; - fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_default}; + fmap[DPNPFuncName::DPNP_FN_ADD][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_default}; + + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_add_c_ext}; + fmap[DPNPFuncName::DPNP_FN_ADD_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_add_c_ext}; fmap[DPNPFuncName::DPNP_FN_ARCTAN2][eft_INT][eft_INT] = {eft_DBL, (void*)dpnp_arctan2_c_default}; @@ -1245,71 +1209,71 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_ARCTAN2_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_arctan2_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_default}; - - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; - fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_default}; + + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; + fmap[DPNPFuncName::DPNP_FN_COPYSIGN_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_copysign_c_ext}; fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_INT][eft_INT] = {eft_DBL, (void*)dpnp_divide_c_default}; @@ -1331,8 +1295,7 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_divide_c_default}; fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_divide_c_default}; - fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_divide_c_default}; + fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_divide_c_default}; fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_divide_c_default}; fmap[DPNPFuncName::DPNP_FN_DIVIDE][eft_DBL][eft_INT] = {eft_DBL, @@ -1364,8 +1327,7 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_divide_c_ext}; fmap[DPNPFuncName::DPNP_FN_DIVIDE_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_divide_c_ext}; - fmap[DPNPFuncName::DPNP_FN_DIVIDE_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_divide_c_ext}; + fmap[DPNPFuncName::DPNP_FN_DIVIDE_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_divide_c_ext}; fmap[DPNPFuncName::DPNP_FN_DIVIDE_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_divide_c_ext}; fmap[DPNPFuncName::DPNP_FN_DIVIDE_EXT][eft_DBL][eft_INT] = {eft_DBL, @@ -1381,67 +1343,43 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_fmod_c_default}; fmap[DPNPFuncName::DPNP_FN_FMOD][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_default}; fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_fmod_c_default}; fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_default}; - fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_default}; + fmap[DPNPFuncName::DPNP_FN_FMOD][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_default}; fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_fmod_c_ext}; fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_ext}; fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_fmod_c_ext}; fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_INT] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_LNG] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; - fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_INT] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_fmod_c_ext}; + fmap[DPNPFuncName::DPNP_FN_FMOD_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_fmod_c_ext}; fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_INT][eft_INT] = {eft_DBL, (void*)dpnp_hypot_c_default}; @@ -1463,16 +1401,13 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_hypot_c_default}; fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_hypot_c_default}; - fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_hypot_c_default}; - fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_hypot_c_default}; + fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_hypot_c_default}; + fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_hypot_c_default}; fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_hypot_c_default}; fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_hypot_c_default}; - fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_hypot_c_default}; + fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_hypot_c_default}; fmap[DPNPFuncName::DPNP_FN_HYPOT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_hypot_c_default}; @@ -1496,16 +1431,13 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_hypot_c_ext}; fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_hypot_c_ext}; - fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_hypot_c_ext}; - fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_hypot_c_ext}; + fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_hypot_c_ext}; + fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_hypot_c_ext}; fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_hypot_c_ext}; fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_hypot_c_ext}; - fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_hypot_c_ext}; + fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_hypot_c_ext}; fmap[DPNPFuncName::DPNP_FN_HYPOT_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_hypot_c_ext}; @@ -1641,56 +1573,56 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_MINIMUM_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_minimum_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_BLN] = { - eft_BLN, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_INT] = { - eft_INT, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_LNG] = { - eft_LNG, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_FLT] = { - eft_FLT, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_BLN] = { - eft_INT, (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_BLN] = {eft_BLN, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_INT] = {eft_INT, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_LNG] = {eft_LNG, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_FLT] = {eft_FLT, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_BLN][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_BLN] = {eft_INT, + (void*)dpnp_multiply_c_default}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_INT] = { eft_INT, (void*)dpnp_multiply_c_default}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_BLN] = { - eft_LNG, (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_BLN] = {eft_LNG, + (void*)dpnp_multiply_c_default}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_multiply_c_default}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_BLN] = { - eft_FLT, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_BLN] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_default}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_BLN] = {eft_FLT, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_BLN] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_default}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_default}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_C64][eft_BLN] = { eft_C64, (void*)dpnp_multiply_c_default, std::complex, bool>}; @@ -1722,56 +1654,56 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_MULTIPLY][eft_C128][eft_C128] = { eft_C128, (void*)dpnp_multiply_c_default, std::complex, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_BLN] = { - eft_BLN, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_INT] = { - eft_INT, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_LNG] = { - eft_LNG, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_FLT] = { - eft_FLT, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_BLN] = { - eft_INT, (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_BLN] = {eft_BLN, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_INT] = {eft_INT, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_LNG] = {eft_LNG, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_FLT] = {eft_FLT, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_BLN][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_BLN] = {eft_INT, + (void*)dpnp_multiply_c_ext}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_INT] = { eft_INT, (void*)dpnp_multiply_c_ext}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_BLN] = { - eft_LNG, (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_BLN] = {eft_LNG, + (void*)dpnp_multiply_c_ext}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_multiply_c_ext}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_BLN] = { - eft_FLT, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_BLN] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; - fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_BLN] = {eft_FLT, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_BLN] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; + fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_multiply_c_ext}; fmap[DPNPFuncName::DPNP_FN_MULTIPLY_EXT][eft_C64][eft_BLN] = { eft_C64, (void*)dpnp_multiply_c_ext, std::complex, bool>}; @@ -1823,16 +1755,13 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_power_c_default}; fmap[DPNPFuncName::DPNP_FN_POWER][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_power_c_default}; - fmap[DPNPFuncName::DPNP_FN_POWER][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_power_c_default}; - fmap[DPNPFuncName::DPNP_FN_POWER][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_power_c_default}; + fmap[DPNPFuncName::DPNP_FN_POWER][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_power_c_default}; + fmap[DPNPFuncName::DPNP_FN_POWER][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_power_c_default}; fmap[DPNPFuncName::DPNP_FN_POWER][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_power_c_default}; fmap[DPNPFuncName::DPNP_FN_POWER][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_power_c_default}; - fmap[DPNPFuncName::DPNP_FN_POWER][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_power_c_default}; + fmap[DPNPFuncName::DPNP_FN_POWER][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_power_c_default}; fmap[DPNPFuncName::DPNP_FN_POWER][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_power_c_default}; @@ -1856,16 +1785,13 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) (void*)dpnp_power_c_ext}; fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_power_c_ext}; - fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_power_c_ext}; - fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_power_c_ext}; + fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_power_c_ext}; + fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_power_c_ext}; fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_power_c_ext}; fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_power_c_ext}; - fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_power_c_ext}; + fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_power_c_ext}; fmap[DPNPFuncName::DPNP_FN_POWER_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_power_c_ext}; @@ -1873,67 +1799,67 @@ static void func_map_init_elemwise_2arg_3type(func_map_t& fmap) eft_INT, (void*)dpnp_subtract_c_default}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_default}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_subtract_c_default}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_default}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_default}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_default}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_INT] = { eft_INT, (void*)dpnp_subtract_c_ext}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_INT][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_subtract_c_ext}; fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_INT] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_LNG] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; - fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_LNG][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_INT] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_LNG] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; + fmap[DPNPFuncName::DPNP_FN_SUBTRACT_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_subtract_c_ext}; return; } diff --git a/dpnp/backend/kernels/dpnp_krnl_fft.cpp b/dpnp/backend/kernels/dpnp_krnl_fft.cpp index 23f612012720..27a91450920a 100644 --- a/dpnp/backend/kernels/dpnp_krnl_fft.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_fft.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -81,9 +81,8 @@ void dpnp_fft_fft_sycl_c(DPCTLSyclQueueRef q_ref, shape_elem_type* input_shape_offsets = reinterpret_cast(sycl::malloc_shared(shape_size * sizeof(shape_elem_type), queue)); // must be a thread local storage. - shape_elem_type* axis_iterator = - reinterpret_cast(sycl::malloc_shared(result_size * shape_size * sizeof(shape_elem_type), - queue)); + shape_elem_type* axis_iterator = reinterpret_cast( + sycl::malloc_shared(result_size * shape_size * sizeof(shape_elem_type), queue)); get_shape_offsets_inkernel(output_shape, shape_size, output_shape_offsets); get_shape_offsets_inkernel(input_shape, shape_size, input_shape_offsets); @@ -203,14 +202,22 @@ void dpnp_fft_fft_mathlib_cmplx_to_cmplx_c(DPCTLSyclQueueRef q_ref, double backward_scale = 1.; double forward_scale = 1.; - if (norm == 0) { // norm = "backward" + if (norm == 0) + { // norm = "backward" backward_scale = 1. / shift; - } else if (norm == 1) { // norm = "forward" + } + else if (norm == 1) + { // norm = "forward" forward_scale = 1. / shift; - } else { // norm = "ortho" - if (inverse) { + } + else + { // norm = "ortho" + if (inverse) + { backward_scale = 1. / sqrt(shift); - } else { + } + else + { forward_scale = 1. / sqrt(shift); } } @@ -225,10 +232,14 @@ void dpnp_fft_fft_mathlib_cmplx_to_cmplx_c(DPCTLSyclQueueRef q_ref, std::vector fft_events; fft_events.reserve(n_iter); - for (size_t i = 0; i < n_iter; ++i) { - if (inverse) { + for (size_t i = 0; i < n_iter; ++i) + { + if (inverse) + { fft_events.push_back(mkl_dft::compute_backward(desc, array_1 + i * shift, result + i * shift)); - } else { + } + else + { fft_events.push_back(mkl_dft::compute_forward(desc, array_1 + i * shift, result + i * shift)); } } @@ -276,19 +287,30 @@ void dpnp_fft_fft_mathlib_real_to_cmplx_c(DPCTLSyclQueueRef q_ref, double backward_scale = 1.; double forward_scale = 1.; - if (norm == 0) { // norm = "backward" - if (inverse) { + if (norm == 0) + { // norm = "backward" + if (inverse) + { forward_scale = 1. / result_shift; - } else { + } + else + { backward_scale = 1. / result_shift; } - } else if (norm == 1) { // norm = "forward" - if (inverse) { + } + else if (norm == 1) + { // norm = "forward" + if (inverse) + { backward_scale = 1. / result_shift; - } else { + } + else + { forward_scale = 1. / result_shift; } - } else { // norm = "ortho" + } + else + { // norm = "ortho" forward_scale = 1. / sqrt(result_shift); } @@ -301,13 +323,15 @@ void dpnp_fft_fft_mathlib_real_to_cmplx_c(DPCTLSyclQueueRef q_ref, std::vector fft_events; fft_events.reserve(n_iter); - for (size_t i = 0; i < n_iter; ++i) { + for (size_t i = 0; i < n_iter; ++i) + { fft_events.push_back(mkl_dft::compute_forward(desc, array_1 + i * input_shift, result + i * result_shift * 2)); } sycl::event::wait(fft_events); - if (real) { // the output size of the rfft function is input_size/2 + 1 so we don't need to fill the second half of the output + if (real) + { // the output size of the rfft function is input_size/2 + 1 so we don't need to fill the second half of the output return; } @@ -322,21 +346,28 @@ void dpnp_fft_fft_mathlib_real_to_cmplx_c(DPCTLSyclQueueRef q_ref, { size_t j = global_id[1]; { - *(reinterpret_cast*>(result) + result_shift * (i + 1) - (j + 1)) = std::conj(*(reinterpret_cast*>(result) + result_shift * i + (j + 1))); + *(reinterpret_cast*>(result) + result_shift * (i + 1) - (j + 1)) = + std::conj( + *(reinterpret_cast*>(result) + result_shift * i + (j + 1))); } } }; auto kernel_func = [&](sycl::handler& cgh) { - cgh.parallel_for>( + cgh.parallel_for< + class dpnp_fft_fft_mathlib_real_to_cmplx_c_kernel<_DataType_input, _DataType_output, _Descriptor_type>>( gws, kernel_parallel_for_func); }; event = queue.submit(kernel_func); event.wait(); - if (inverse) { - event = oneapi::mkl::vm::conj(queue, result_size, reinterpret_cast*>(result), reinterpret_cast*>(result)); + if (inverse) + { + event = oneapi::mkl::vm::conj(queue, + result_size, + reinterpret_cast*>(result), + reinterpret_cast*>(result)); event.wait(); } @@ -377,16 +408,34 @@ DPCTLSyclEventRef dpnp_fft_fft_c(DPCTLSyclQueueRef q_ref, std::is_same<_DataType_output, std::complex>::value) { desc_dp_cmplx_t desc(dim); - dpnp_fft_fft_mathlib_cmplx_to_cmplx_c<_DataType_input, _DataType_output, desc_dp_cmplx_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm); + dpnp_fft_fft_mathlib_cmplx_to_cmplx_c<_DataType_input, _DataType_output, desc_dp_cmplx_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm); } /* complex-to-complex, single precision */ else if constexpr (std::is_same<_DataType_input, std::complex>::value && std::is_same<_DataType_output, std::complex>::value) { desc_sp_cmplx_t desc(dim); - dpnp_fft_fft_mathlib_cmplx_to_cmplx_c<_DataType_input, _DataType_output, desc_sp_cmplx_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm); + dpnp_fft_fft_mathlib_cmplx_to_cmplx_c<_DataType_input, _DataType_output, desc_sp_cmplx_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm); } /* real-to-complex, double precision */ else if constexpr (std::is_same<_DataType_input, double>::value && @@ -394,33 +443,76 @@ DPCTLSyclEventRef dpnp_fft_fft_c(DPCTLSyclQueueRef q_ref, { desc_dp_real_t desc(dim); - dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, double, desc_dp_real_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 0); + dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, double, desc_dp_real_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 0); } /* real-to-complex, single precision */ else if constexpr (std::is_same<_DataType_input, float>::value && std::is_same<_DataType_output, std::complex>::value) { desc_sp_real_t desc(dim); // try: 2 * result_size - dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, float, desc_sp_real_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 0); + dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, float, desc_sp_real_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 0); } else if constexpr (std::is_same<_DataType_input, int32_t>::value || std::is_same<_DataType_input, int64_t>::value) { double* array1_copy = reinterpret_cast(dpnp_memory_alloc_c(input_size * sizeof(double))); - shape_elem_type* copy_strides = reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); + shape_elem_type* copy_strides = + reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); *copy_strides = 1; - shape_elem_type* copy_shape = reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); + shape_elem_type* copy_shape = + reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); *copy_shape = input_size; shape_elem_type copy_shape_size = 1; - dpnp_copyto_c<_DataType_input, double>(q_ref, array1_copy, input_size, copy_shape_size, copy_shape, copy_strides, - array1_in, input_size, copy_shape_size, copy_shape, copy_strides, NULL, dep_event_vec_ref); + dpnp_copyto_c<_DataType_input, double>(q_ref, + array1_copy, + input_size, + copy_shape_size, + copy_shape, + copy_strides, + array1_in, + input_size, + copy_shape_size, + copy_shape, + copy_strides, + NULL, + dep_event_vec_ref); desc_dp_real_t desc(dim); - dpnp_fft_fft_mathlib_real_to_cmplx_c( - q_ref, array1_copy, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 0); + dpnp_fft_fft_mathlib_real_to_cmplx_c(q_ref, + array1_copy, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 0); dpnp_memory_free_c(q_ref, array1_copy); dpnp_memory_free_c(q_ref, copy_strides); @@ -473,15 +565,9 @@ void dpnp_fft_fft_c(const void* array1_in, } template -void (*dpnp_fft_fft_default_c)(const void*, - void*, - const shape_elem_type*, - const shape_elem_type*, - size_t, - long, - long, - size_t, - const size_t) = dpnp_fft_fft_c<_DataType_input, _DataType_output>; +void (*dpnp_fft_fft_default_c)( + const void*, void*, const shape_elem_type*, const shape_elem_type*, size_t, long, long, size_t, const size_t) = + dpnp_fft_fft_c<_DataType_input, _DataType_output>; template DPCTLSyclEventRef (*dpnp_fft_fft_ext_c)(DPCTLSyclQueueRef, @@ -496,7 +582,6 @@ DPCTLSyclEventRef (*dpnp_fft_fft_ext_c)(DPCTLSyclQueueRef, const size_t, const DPCTLEventVectorRef) = dpnp_fft_fft_c<_DataType_input, _DataType_output>; - template DPCTLSyclEventRef dpnp_fft_rfft_c(DPCTLSyclQueueRef q_ref, const void* array1_in, @@ -524,42 +609,84 @@ DPCTLSyclEventRef dpnp_fft_rfft_c(DPCTLSyclQueueRef q_ref, size_t dim = input_shape[shape_size - 1]; - if constexpr (std::is_same<_DataType_output, std::complex>::value || std::is_same<_DataType_output, std::complex>::value) { if constexpr (std::is_same<_DataType_input, double>::value && - std::is_same<_DataType_output, std::complex>::value) + std::is_same<_DataType_output, std::complex>::value) { desc_dp_real_t desc(dim); - dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, double, desc_dp_real_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 1); + dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, double, desc_dp_real_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 1); } /* real-to-complex, single precision */ else if constexpr (std::is_same<_DataType_input, float>::value && std::is_same<_DataType_output, std::complex>::value) { desc_sp_real_t desc(dim); // try: 2 * result_size - dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, float, desc_sp_real_t>( - q_ref, array1_in, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 1); + dpnp_fft_fft_mathlib_real_to_cmplx_c<_DataType_input, float, desc_sp_real_t>(q_ref, + array1_in, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 1); } else if constexpr (std::is_same<_DataType_input, int32_t>::value || std::is_same<_DataType_input, int64_t>::value) { double* array1_copy = reinterpret_cast(dpnp_memory_alloc_c(input_size * sizeof(double))); - shape_elem_type* copy_strides = reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); + shape_elem_type* copy_strides = + reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); *copy_strides = 1; - shape_elem_type* copy_shape = reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); + shape_elem_type* copy_shape = + reinterpret_cast(dpnp_memory_alloc_c(q_ref, sizeof(shape_elem_type))); *copy_shape = input_size; shape_elem_type copy_shape_size = 1; - dpnp_copyto_c<_DataType_input, double>(q_ref, array1_copy, input_size, copy_shape_size, copy_shape, copy_strides, - array1_in, input_size, copy_shape_size, copy_shape, copy_strides, NULL, dep_event_vec_ref); + dpnp_copyto_c<_DataType_input, double>(q_ref, + array1_copy, + input_size, + copy_shape_size, + copy_shape, + copy_strides, + array1_in, + input_size, + copy_shape_size, + copy_shape, + copy_strides, + NULL, + dep_event_vec_ref); desc_dp_real_t desc(dim); - dpnp_fft_fft_mathlib_real_to_cmplx_c( - q_ref, array1_copy, result_out, input_shape, result_shape, shape_size, input_size, result_size, desc, inverse, norm, 1); + dpnp_fft_fft_mathlib_real_to_cmplx_c(q_ref, + array1_copy, + result_out, + input_shape, + result_shape, + shape_size, + input_size, + result_size, + desc, + inverse, + norm, + 1); dpnp_memory_free_c(q_ref, array1_copy); dpnp_memory_free_c(q_ref, copy_strides); @@ -570,7 +697,6 @@ DPCTLSyclEventRef dpnp_fft_rfft_c(DPCTLSyclQueueRef q_ref, return event_ref; } - template void dpnp_fft_rfft_c(const void* array1_in, void* result1, @@ -599,15 +725,9 @@ void dpnp_fft_rfft_c(const void* array1_in, } template -void (*dpnp_fft_rfft_default_c)(const void*, - void*, - const shape_elem_type*, - const shape_elem_type*, - size_t, - long, - long, - size_t, - const size_t) = dpnp_fft_rfft_c<_DataType_input, _DataType_output>; +void (*dpnp_fft_rfft_default_c)( + const void*, void*, const shape_elem_type*, const shape_elem_type*, size_t, long, long, size_t, const size_t) = + dpnp_fft_rfft_c<_DataType_input, _DataType_output>; template DPCTLSyclEventRef (*dpnp_fft_rfft_ext_c)(DPCTLSyclQueueRef, @@ -620,7 +740,8 @@ DPCTLSyclEventRef (*dpnp_fft_rfft_ext_c)(DPCTLSyclQueueRef, long, size_t, const size_t, - const DPCTLEventVectorRef) = dpnp_fft_rfft_c<_DataType_input, _DataType_output>; + const DPCTLEventVectorRef) = + dpnp_fft_rfft_c<_DataType_input, _DataType_output>; void func_map_init_fft_func(func_map_t& fmap) { @@ -628,8 +749,8 @@ void func_map_init_fft_func(func_map_t& fmap) eft_C128, (void*)dpnp_fft_fft_default_c>}; fmap[DPNPFuncName::DPNP_FN_FFT_FFT][eft_LNG][eft_LNG] = { eft_C128, (void*)dpnp_fft_fft_default_c>}; - fmap[DPNPFuncName::DPNP_FN_FFT_FFT][eft_FLT][eft_FLT] = { - eft_C64, (void*)dpnp_fft_fft_default_c>}; + fmap[DPNPFuncName::DPNP_FN_FFT_FFT][eft_FLT][eft_FLT] = {eft_C64, + (void*)dpnp_fft_fft_default_c>}; fmap[DPNPFuncName::DPNP_FN_FFT_FFT][eft_DBL][eft_DBL] = { eft_C128, (void*)dpnp_fft_fft_default_c>}; fmap[DPNPFuncName::DPNP_FN_FFT_FFT][eft_C64][eft_C64] = { diff --git a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp index 5cde013b69f8..068307b5e16b 100644 --- a/dpnp/backend/kernels/dpnp_krnl_indexing.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_indexing.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -99,14 +99,8 @@ void dpnp_choose_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_choose_c<_DataType1, _DataType2>(q_ref, - result1, - array1_in, - choices1, - size, - choices_size, - choice_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_choose_c<_DataType1, _DataType2>( + q_ref, result1, array1_in, choices1, size, choices_size, choice_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -114,14 +108,9 @@ template void (*dpnp_choose_default_c)(void*, void*, void**, size_t, size_t, size_t) = dpnp_choose_c<_DataType1, _DataType2>; template -DPCTLSyclEventRef (*dpnp_choose_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void**, - size_t, - size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_choose_c<_DataType1, _DataType2>; +DPCTLSyclEventRef (*dpnp_choose_ext_c)( + DPCTLSyclQueueRef, void*, void*, void**, size_t, size_t, size_t, const DPCTLEventVectorRef) = + dpnp_choose_c<_DataType1, _DataType2>; template DPCTLSyclEventRef dpnp_diag_indices_c(DPCTLSyclQueueRef q_ref, @@ -137,10 +126,7 @@ void dpnp_diag_indices_c(void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_diag_indices_c<_DataType>(q_ref, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_diag_indices_c<_DataType>(q_ref, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -148,10 +134,8 @@ template void (*dpnp_diag_indices_default_c)(void*, size_t) = dpnp_diag_indices_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_diag_indices_ext_c)(DPCTLSyclQueueRef, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_diag_indices_c<_DataType>; +DPCTLSyclEventRef (*dpnp_diag_indices_ext_c)(DPCTLSyclQueueRef, void*, size_t, const DPCTLEventVectorRef) = + dpnp_diag_indices_c<_DataType>; template DPCTLSyclEventRef dpnp_diagonal_c(DPCTLSyclQueueRef q_ref, @@ -289,15 +273,8 @@ void dpnp_diagonal_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_diagonal_c<_DataType>(q_ref, - array1_in, - input1_size, - result1, - offset, - shape, - res_shape, - res_ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_diagonal_c<_DataType>( + q_ref, array1_in, input1_size, result1, offset, shape, res_shape, res_ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -379,12 +356,8 @@ void dpnp_fill_diagonal_c(void* array1_in, void* val_in, shape_elem_type* shape, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_fill_diagonal_c<_DataType>(q_ref, - array1_in, - val_in, - shape, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_fill_diagonal_c<_DataType>(q_ref, array1_in, val_in, shape, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -466,24 +439,14 @@ void dpnp_nonzero_c(const void* in_array1, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_nonzero_c<_DataType>(q_ref, - in_array1, - result1, - result_size, - shape, - ndim, - j, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_nonzero_c<_DataType>(q_ref, in_array1, result1, result_size, shape, ndim, j, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_nonzero_default_c)(const void*, - void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t) = dpnp_nonzero_c<_DataType>; +void (*dpnp_nonzero_default_c)(const void*, void*, const size_t, const shape_elem_type*, const size_t, const size_t) = + dpnp_nonzero_c<_DataType>; template DPCTLSyclEventRef (*dpnp_nonzero_ext_c)(DPCTLSyclQueueRef, @@ -547,13 +510,8 @@ void dpnp_place_c(void* arr_in, long* mask_in, void* vals_in, const size_t arr_s { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_place_c<_DataType>(q_ref, - arr_in, - mask_in, - vals_in, - arr_size, - vals_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_place_c<_DataType>(q_ref, arr_in, mask_in, vals_in, arr_size, vals_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -622,34 +580,19 @@ void dpnp_put_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_put_c<_DataType, _IndecesType, _ValueType>(q_ref, - array1_in, - ind_in, - v_in, - size, - size_ind, - size_v, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_put_c<_DataType, _IndecesType, _ValueType>( + q_ref, array1_in, ind_in, v_in, size, size_ind, size_v, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_put_default_c)(void*, - void*, - void*, - const size_t, - const size_t, - const size_t) = dpnp_put_c<_DataType, _IndecesType, _ValueType>; +void (*dpnp_put_default_c)(void*, void*, void*, const size_t, const size_t, const size_t) = + dpnp_put_c<_DataType, _IndecesType, _ValueType>; template -DPCTLSyclEventRef (*dpnp_put_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - const size_t, - const size_t, - const size_t, - const DPCTLEventVectorRef) = dpnp_put_c<_DataType, _IndecesType, _ValueType>; +DPCTLSyclEventRef (*dpnp_put_ext_c)( + DPCTLSyclQueueRef, void*, void*, void*, const size_t, const size_t, const size_t, const DPCTLEventVectorRef) = + dpnp_put_c<_DataType, _IndecesType, _ValueType>; template DPCTLSyclEventRef dpnp_put_along_axis_c(DPCTLSyclQueueRef q_ref, @@ -765,7 +708,7 @@ DPCTLSyclEventRef dpnp_put_along_axis_c(DPCTLSyclQueueRef q_ref, } } - // FIXME: computed, but unused. Commented out per compiler warning + // FIXME: computed, but unused. Commented out per compiler warning // size_t source_idx = 0; // for (size_t i = 0; i < static_cast(ndim); ++i) // { @@ -848,28 +791,14 @@ void dpnp_put_along_axis_c(void* arr_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_put_along_axis_c<_DataType>(q_ref, - arr_in, - indices_in, - values_in, - axis, - shape, - ndim, - size_indices, - values_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_put_along_axis_c<_DataType>( + q_ref, arr_in, indices_in, values_in, axis, shape, ndim, size_indices, values_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_put_along_axis_default_c)(void*, - long*, - void*, - size_t, - const shape_elem_type*, - size_t, - size_t, - size_t) = dpnp_put_along_axis_c<_DataType>; +void (*dpnp_put_along_axis_default_c)(void*, long*, void*, size_t, const shape_elem_type*, size_t, size_t, size_t) = + dpnp_put_along_axis_c<_DataType>; template DPCTLSyclEventRef (*dpnp_put_along_axis_ext_c)(DPCTLSyclQueueRef, @@ -926,13 +855,8 @@ void dpnp_take_c(void* array1_in, const size_t array1_size, void* indices1, void { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_take_c<_DataType, _IndecesType>(q_ref, - array1_in, - array1_size, - indices1, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_take_c<_DataType, _IndecesType>(q_ref, array1_in, array1_size, indices1, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -969,23 +893,15 @@ void func_map_init_indexing_func(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_CHOOSE_EXT][eft_LNG][eft_FLT] = {eft_FLT, (void*)dpnp_choose_ext_c}; fmap[DPNPFuncName::DPNP_FN_CHOOSE_EXT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_choose_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_diag_indices_default_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_diag_indices_default_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_diag_indices_default_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_diag_indices_default_c}; - - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_diag_indices_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_diag_indices_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_diag_indices_ext_c}; - fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_diag_indices_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_diag_indices_default_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_diag_indices_default_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_diag_indices_default_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_diag_indices_default_c}; + + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_diag_indices_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_diag_indices_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_diag_indices_ext_c}; + fmap[DPNPFuncName::DPNP_FN_DIAG_INDICES_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_diag_indices_ext_c}; fmap[DPNPFuncName::DPNP_FN_DIAGONAL][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_diagonal_default_c}; fmap[DPNPFuncName::DPNP_FN_DIAGONAL][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_diagonal_default_c}; @@ -1001,8 +917,7 @@ void func_map_init_indexing_func(func_map_t& fmap) (void*)dpnp_fill_diagonal_default_c}; fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_fill_diagonal_default_c}; - fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_fill_diagonal_default_c}; + fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_fill_diagonal_default_c}; fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_fill_diagonal_default_c}; @@ -1010,8 +925,7 @@ void func_map_init_indexing_func(func_map_t& fmap) (void*)dpnp_fill_diagonal_ext_c}; fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_fill_diagonal_ext_c}; - fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_fill_diagonal_ext_c}; + fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_fill_diagonal_ext_c}; fmap[DPNPFuncName::DPNP_FN_FILL_DIAGONAL_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_fill_diagonal_ext_c}; @@ -1035,23 +949,15 @@ void func_map_init_indexing_func(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_PLACE_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_place_ext_c}; fmap[DPNPFuncName::DPNP_FN_PLACE_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_place_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_put_default_c}; - fmap[DPNPFuncName::DPNP_FN_PUT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_put_default_c}; - fmap[DPNPFuncName::DPNP_FN_PUT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_put_default_c}; - fmap[DPNPFuncName::DPNP_FN_PUT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_put_default_c}; - - fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_put_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_LNG][eft_LNG] = {eft_LNG, - (void*)dpnp_put_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_put_ext_c}; - fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_put_ext_c}; + fmap[DPNPFuncName::DPNP_FN_PUT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_put_default_c}; + fmap[DPNPFuncName::DPNP_FN_PUT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_put_default_c}; + fmap[DPNPFuncName::DPNP_FN_PUT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_put_default_c}; + fmap[DPNPFuncName::DPNP_FN_PUT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_put_default_c}; + + fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_put_ext_c}; + fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_put_ext_c}; + fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_put_ext_c}; + fmap[DPNPFuncName::DPNP_FN_PUT_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_put_ext_c}; fmap[DPNPFuncName::DPNP_FN_PUT_ALONG_AXIS][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_put_along_axis_default_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_linalg.cpp b/dpnp/backend/kernels/dpnp_krnl_linalg.cpp index 77bdad0c6b1c..c0d01ad569c1 100644 --- a/dpnp/backend/kernels/dpnp_krnl_linalg.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_linalg.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -75,7 +75,8 @@ DPCTLSyclEventRef dpnp_cholesky_c(DPCTLSyclQueueRef q_ref, const std::int64_t scratchpad_size = mkl_lapack::potrf_scratchpad_size<_DataType>(q, oneapi::mkl::uplo::upper, n, lda); - _DataType* scratchpad = reinterpret_cast<_DataType*>(sycl::malloc_shared(scratchpad_size * sizeof(_DataType), q)); + _DataType* scratchpad = + reinterpret_cast<_DataType*>(sycl::malloc_shared(scratchpad_size * sizeof(_DataType), q)); event = mkl_lapack::potrf(q, oneapi::mkl::uplo::upper, n, in_a, lda, scratchpad, scratchpad_size); @@ -115,12 +116,8 @@ void dpnp_cholesky_c(void* array1_in, void* result1, const size_t size, const si { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_cholesky_c<_DataType>(q_ref, - array1_in, - result1, - size, - data_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_cholesky_c<_DataType>(q_ref, array1_in, result1, size, data_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -256,12 +253,7 @@ void dpnp_det_c(void* array1_in, void* result1, shape_elem_type* shape, size_t n { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_det_c<_DataType>(q_ref, - array1_in, - result1, - shape, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_det_c<_DataType>(q_ref, array1_in, result1, shape, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -269,12 +261,8 @@ template void (*dpnp_det_default_c)(void*, void*, shape_elem_type*, size_t) = dpnp_det_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_det_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - shape_elem_type*, - size_t, - const DPCTLEventVectorRef) = dpnp_det_c<_DataType>; +DPCTLSyclEventRef (*dpnp_det_ext_c)( + DPCTLSyclQueueRef, void*, void*, shape_elem_type*, size_t, const DPCTLEventVectorRef) = dpnp_det_c<_DataType>; template DPCTLSyclEventRef dpnp_inv_c(DPCTLSyclQueueRef q_ref, @@ -398,12 +386,8 @@ void dpnp_inv_c(void* array1_in, void* result1, shape_elem_type* shape, size_t n { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_inv_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - shape, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_inv_c<_DataType, _ResultType>(q_ref, array1_in, result1, shape, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -518,26 +502,14 @@ void dpnp_kron_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_kron_c<_DataType1, _DataType2, _ResultType>(q_ref, - array1_in, - array2_in, - result1, - in1_shape, - in2_shape, - res_shape, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_kron_c<_DataType1, _DataType2, _ResultType>( + q_ref, array1_in, array2_in, result1, in1_shape, in2_shape, res_shape, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_kron_default_c)(void*, - void*, - void*, - shape_elem_type*, - shape_elem_type*, - shape_elem_type*, - size_t) = dpnp_kron_c<_DataType1, _DataType2, _ResultType>; +void (*dpnp_kron_default_c)(void*, void*, void*, shape_elem_type*, shape_elem_type*, shape_elem_type*, size_t) = + dpnp_kron_c<_DataType1, _DataType2, _ResultType>; template DPCTLSyclEventRef (*dpnp_kron_ext_c)(DPCTLSyclQueueRef, @@ -609,12 +581,8 @@ void dpnp_matrix_rank_c(void* array1_in, void* result1, shape_elem_type* shape, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_matrix_rank_c<_DataType>(q_ref, - array1_in, - result1, - shape, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_matrix_rank_c<_DataType>(q_ref, array1_in, result1, shape, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -683,7 +651,8 @@ DPCTLSyclEventRef dpnp_qr_c(DPCTLSyclQueueRef q_ref, event = mkl_lapack::geqrf(q, size_m, size_n, in_a, lda, tau, geqrf_scratchpad, geqrf_scratchpad_size, depends); event.wait(); - if (!depends.empty()) { + if (!depends.empty()) + { verbose_print("oneapi::mkl::lapack::geqrf", depends.front(), event); } @@ -720,7 +689,8 @@ DPCTLSyclEventRef dpnp_qr_c(DPCTLSyclQueueRef q_ref, mkl_lapack::orgqr(q, size_m, nrefl, nrefl, in_a, lda, tau, orgqr_scratchpad, orgqr_scratchpad_size, depends); event.wait(); - if (!depends.empty()) { + if (!depends.empty()) + { verbose_print("oneapi::mkl::lapack::orgqr", depends.front(), event); } @@ -744,14 +714,8 @@ void dpnp_qr_c(void* array1_in, void* result1, void* result2, void* result3, siz { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_qr_c<_InputDT, _ComputeDT>(q_ref, - array1_in, - result1, - result2, - result3, - size_m, - size_n, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_qr_c<_InputDT, _ComputeDT>(q_ref, array1_in, result1, result2, result3, size_m, size_n, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -759,14 +723,9 @@ template void (*dpnp_qr_default_c)(void*, void*, void*, void*, size_t, size_t) = dpnp_qr_c<_InputDT, _ComputeDT>; template -DPCTLSyclEventRef (*dpnp_qr_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - void*, - size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_qr_c<_InputDT, _ComputeDT>; +DPCTLSyclEventRef (*dpnp_qr_ext_c)( + DPCTLSyclQueueRef, void*, void*, void*, void*, size_t, size_t, const DPCTLEventVectorRef) = + dpnp_qr_c<_InputDT, _ComputeDT>; template DPCTLSyclEventRef dpnp_svd_c(DPCTLSyclQueueRef q_ref, @@ -786,7 +745,8 @@ DPCTLSyclEventRef dpnp_svd_c(DPCTLSyclQueueRef q_ref, sycl::event event; - DPNPC_ptr_adapter<_InputDT> input1_ptr(q_ref, array1_in, size_m * size_n, true); // TODO no need this if use dpnp_copy_to() + DPNPC_ptr_adapter<_InputDT> input1_ptr( + q_ref, array1_in, size_m * size_n, true); // TODO no need this if use dpnp_copy_to() _InputDT* in_array = input1_ptr.get_ptr(); // math lib gesvd func overrides input @@ -813,8 +773,8 @@ DPCTLSyclEventRef dpnp_svd_c(DPCTLSyclQueueRef q_ref, const std::int64_t scratchpad_size = mkl_lapack::gesvd_scratchpad_size<_ComputeDT>( q, oneapi::mkl::jobsvd::vectors, oneapi::mkl::jobsvd::vectors, n, m, lda, ldvt, ldu); - _ComputeDT* scratchpad = reinterpret_cast<_ComputeDT*>(sycl::malloc_shared(scratchpad_size * sizeof(_ComputeDT), - q)); + _ComputeDT* scratchpad = + reinterpret_cast<_ComputeDT*>(sycl::malloc_shared(scratchpad_size * sizeof(_ComputeDT), q)); event = mkl_lapack::gesvd(q, oneapi::mkl::jobsvd::vectors, // onemkl::job jobu, @@ -843,14 +803,8 @@ void dpnp_svd_c(void* array1_in, void* result1, void* result2, void* result3, si { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_svd_c<_InputDT, _ComputeDT, _SVDT>(q_ref, - array1_in, - result1, - result2, - result3, - size_m, - size_n, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_svd_c<_InputDT, _ComputeDT, _SVDT>( + q_ref, array1_in, result1, result2, result3, size_m, size_n, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -858,14 +812,9 @@ template void (*dpnp_svd_default_c)(void*, void*, void*, void*, size_t, size_t) = dpnp_svd_c<_InputDT, _ComputeDT, _SVDT>; template -DPCTLSyclEventRef (*dpnp_svd_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - void*, - size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_svd_c<_InputDT, _ComputeDT, _SVDT>; +DPCTLSyclEventRef (*dpnp_svd_ext_c)( + DPCTLSyclQueueRef, void*, void*, void*, void*, size_t, size_t, const DPCTLEventVectorRef) = + dpnp_svd_c<_InputDT, _ComputeDT, _SVDT>; void func_map_init_linalg_func(func_map_t& fmap) { @@ -886,40 +835,28 @@ void func_map_init_linalg_func(func_map_t& fmap) (void*)dpnp_kron_default_c}; fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_FLT] = {eft_FLT, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_kron_default_c}; // fmap[DPNPFuncName::DPNP_FN_KRON][eft_INT][eft_C128] = { // eft_C128, (void*)dpnp_kron_default_c, std::complex>}; fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_kron_default_c}; fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_FLT] = {eft_FLT, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_kron_default_c}; // fmap[DPNPFuncName::DPNP_FN_KRON][eft_LNG][eft_C128] = { // eft_C128, (void*)dpnp_kron_default_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_INT] = {eft_FLT, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_LNG] = {eft_FLT, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_INT] = {eft_FLT, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_LNG] = {eft_FLT, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_kron_default_c}; // fmap[DPNPFuncName::DPNP_FN_KRON][eft_FLT][eft_C128] = { // eft_C128, (void*)dpnp_kron_default_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_kron_default_c}; - fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_kron_default_c}; + fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_kron_default_c}; fmap[DPNPFuncName::DPNP_FN_KRON][eft_DBL][eft_C128] = { eft_C128, (void*)dpnp_kron_default_c, std::complex>}; // fmap[DPNPFuncName::DPNP_FN_KRON][eft_C128][eft_INT] = { @@ -937,40 +874,28 @@ void func_map_init_linalg_func(func_map_t& fmap) (void*)dpnp_kron_ext_c}; fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_LNG] = {eft_LNG, (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_FLT] = {eft_FLT, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_DBL] = {eft_DBL, (void*)dpnp_kron_ext_c}; // fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_INT][eft_C128] = { // eft_C128, (void*)dpnp_kron_ext_c, std::complex>}; fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_INT] = {eft_LNG, (void*)dpnp_kron_ext_c}; fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_FLT] = {eft_FLT, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_DBL] = {eft_DBL, (void*)dpnp_kron_ext_c}; // fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_LNG][eft_C128] = { // eft_C128, (void*)dpnp_kron_ext_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_INT] = {eft_FLT, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_LNG] = {eft_FLT, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_INT] = {eft_FLT, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_LNG] = {eft_FLT, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_kron_ext_c}; // fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_FLT][eft_C128] = { // eft_C128, (void*)dpnp_kron_ext_c, std::complex>}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_INT] = {eft_DBL, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_LNG] = {eft_DBL, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_kron_ext_c}; - fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_kron_ext_c}; + fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_kron_ext_c}; fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_DBL][eft_C128] = { eft_C128, (void*)dpnp_kron_ext_c, std::complex>}; // fmap[DPNPFuncName::DPNP_FN_KRON_EXT][eft_C128][eft_INT] = { diff --git a/dpnp/backend/kernels/dpnp_krnl_logic.cpp b/dpnp/backend/kernels/dpnp_krnl_logic.cpp index 109246913589..ae130afb8e4d 100644 --- a/dpnp/backend/kernels/dpnp_krnl_logic.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_logic.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -91,11 +91,8 @@ void dpnp_all_c(const void* array1_in, void* result1, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_all_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_all_c<_DataType, _ResultType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -103,11 +100,8 @@ template void (*dpnp_all_default_c)(const void*, void*, const size_t) = dpnp_all_c<_DataType, _ResultType>; template -DPCTLSyclEventRef (*dpnp_all_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_all_c<_DataType, _ResultType>; +DPCTLSyclEventRef (*dpnp_all_ext_c)(DPCTLSyclQueueRef, const void*, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_all_c<_DataType, _ResultType>; template class dpnp_allclose_c_kernel; @@ -177,35 +171,19 @@ void dpnp_allclose_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_allclose_c<_DataType1, _DataType2, _ResultType>(q_ref, - array1_in, - array2_in, - result1, - size, - rtol_val, - atol_val, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_allclose_c<_DataType1, _DataType2, _ResultType>( + q_ref, array1_in, array2_in, result1, size, rtol_val, atol_val, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_allclose_default_c)(const void*, - const void*, - void*, - const size_t, - double, - double) = dpnp_allclose_c<_DataType1, _DataType2, _ResultType>; +void (*dpnp_allclose_default_c)(const void*, const void*, void*, const size_t, double, double) = + dpnp_allclose_c<_DataType1, _DataType2, _ResultType>; template DPCTLSyclEventRef (*dpnp_allclose_ext_c)( - DPCTLSyclQueueRef, - const void*, - const void*, - void*, - const size_t, - double, - double, - const DPCTLEventVectorRef) = dpnp_allclose_c<_DataType1, _DataType2, _ResultType>; + DPCTLSyclQueueRef, const void*, const void*, void*, const size_t, double, double, const DPCTLEventVectorRef) = + dpnp_allclose_c<_DataType1, _DataType2, _ResultType>; template class dpnp_any_c_kernel; @@ -268,11 +246,8 @@ void dpnp_any_c(const void* array1_in, void* result1, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_any_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_any_c<_DataType, _ResultType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -280,11 +255,8 @@ template void (*dpnp_any_default_c)(const void*, void*, const size_t) = dpnp_any_c<_DataType, _ResultType>; template -DPCTLSyclEventRef (*dpnp_any_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_any_c<_DataType, _ResultType>; +DPCTLSyclEventRef (*dpnp_any_ext_c)(DPCTLSyclQueueRef, const void*, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_any_c<_DataType, _ResultType>; void func_map_init_logic(func_map_t& fmap) { diff --git a/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp b/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp index 8a122dbf7283..480dec8f4c0c 100644 --- a/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_manipulation.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -63,7 +63,7 @@ DPCTLSyclEventRef dpnp_repeat_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); sycl::event event; - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,array1_in, size); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, array1_in, size); const _DataType* array_in = input1_ptr.get_ptr(); _DataType* result = reinterpret_cast<_DataType*>(result1); @@ -90,12 +90,7 @@ void dpnp_repeat_c(const void* array1_in, void* result1, const size_t repeats, c { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_repeat_c<_DataType>(q_ref, - array1_in, - result1, - repeats, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_repeat_c<_DataType>(q_ref, array1_in, result1, repeats, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -137,7 +132,7 @@ DPCTLSyclEventRef dpnp_elemwise_transpose_c(DPCTLSyclQueueRef q_ref, sycl::queue q = *(reinterpret_cast(q_ref)); sycl::event event; - DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref,array1_in, size); + DPNPC_ptr_adapter<_DataType> input1_ptr(q_ref, array1_in, size); _DataType* array1 = input1_ptr.get_ptr(); _DataType* result = reinterpret_cast<_DataType*>(result1); @@ -201,15 +196,8 @@ void dpnp_elemwise_transpose_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_elemwise_transpose_c<_DataType>(q_ref, - array1_in, - input_shape, - result_shape, - permute_axes, - ndim, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_elemwise_transpose_c<_DataType>( + q_ref, array1_in, input_shape, result_shape, permute_axes, ndim, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } diff --git a/dpnp/backend/kernels/dpnp_krnl_mathematical.cpp b/dpnp/backend/kernels/dpnp_krnl_mathematical.cpp index 32f8ffe465d2..676b08b17075 100644 --- a/dpnp/backend/kernels/dpnp_krnl_mathematical.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_mathematical.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -94,12 +94,8 @@ void dpnp_around_c(const void* input_in, void* result_out, const size_t input_si { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_around_c<_DataType>(q_ref, - input_in, - result_out, - input_size, - decimals, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_around_c<_DataType>(q_ref, input_in, result_out, input_size, decimals, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -182,11 +178,8 @@ void dpnp_elemwise_absolute_c(const void* input1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_elemwise_absolute_c<_DataType>(q_ref, - input1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_elemwise_absolute_c<_DataType>(q_ref, input1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -194,11 +187,8 @@ template void (*dpnp_elemwise_absolute_default_c)(const void*, void*, size_t) = dpnp_elemwise_absolute_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_elemwise_absolute_ext_c)(DPCTLSyclQueueRef, - const void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_elemwise_absolute_c<_DataType>; +DPCTLSyclEventRef (*dpnp_elemwise_absolute_ext_c)( + DPCTLSyclQueueRef, const void*, void*, size_t, const DPCTLEventVectorRef) = dpnp_elemwise_absolute_c<_DataType>; // template void dpnp_elemwise_absolute_c(void* array1_in, void* result1, size_t size); // template void dpnp_elemwise_absolute_c(void* array1_in, void* result1, size_t size); @@ -289,29 +279,26 @@ void (*dpnp_cross_default_c)(void*, const size_t*) = dpnp_cross_c<_DataType_output, _DataType_input1, _DataType_input2>; template -DPCTLSyclEventRef (*dpnp_cross_ext_c)( - DPCTLSyclQueueRef, - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*, - const DPCTLEventVectorRef) = dpnp_cross_c<_DataType_output, _DataType_input1, _DataType_input2>; +DPCTLSyclEventRef (*dpnp_cross_ext_c)(DPCTLSyclQueueRef, + void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*, + const DPCTLEventVectorRef) = + dpnp_cross_c<_DataType_output, _DataType_input1, _DataType_input2>; template class dpnp_cumprod_c_kernel; template -DPCTLSyclEventRef dpnp_cumprod_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_cumprod_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -346,11 +333,8 @@ void dpnp_cumprod_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_cumprod_c<_DataType_input, _DataType_output>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_cumprod_c<_DataType_input, _DataType_output>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -358,21 +342,15 @@ template void (*dpnp_cumprod_default_c)(void*, void*, size_t) = dpnp_cumprod_c<_DataType_input, _DataType_output>; template -DPCTLSyclEventRef (*dpnp_cumprod_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_cumprod_c<_DataType_input, _DataType_output>; +DPCTLSyclEventRef (*dpnp_cumprod_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_cumprod_c<_DataType_input, _DataType_output>; template class dpnp_cumsum_c_kernel; template -DPCTLSyclEventRef dpnp_cumsum_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_cumsum_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -407,11 +385,8 @@ void dpnp_cumsum_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_cumsum_c<_DataType_input, _DataType_output>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_cumsum_c<_DataType_input, _DataType_output>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -419,11 +394,8 @@ template void (*dpnp_cumsum_default_c)(void*, void*, size_t) = dpnp_cumsum_c<_DataType_input, _DataType_output>; template -DPCTLSyclEventRef (*dpnp_cumsum_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_cumsum_c<_DataType_input, _DataType_output>; +DPCTLSyclEventRef (*dpnp_cumsum_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_cumsum_c<_DataType_input, _DataType_output>; template class dpnp_ediff1d_c_kernel; @@ -480,8 +452,7 @@ DPCTLSyclEventRef dpnp_ediff1d_c(DPCTLSyclQueueRef q_ref, } }; auto kernel_func = [&](cl::sycl::handler& cgh) { - cgh.parallel_for>( - gws, kernel_parallel_for_func); + cgh.parallel_for>(gws, kernel_parallel_for_func); }; event = q.submit(kernel_func); @@ -666,50 +637,50 @@ void dpnp_floor_divide_c(void* result_out, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>( - q_ref, - result_out, - input1_in, - input1_size, - input1_shape, - input1_shape_ndim, - input2_in, - input2_size, - input2_shape, - input2_shape_ndim, - where, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>(q_ref, + result_out, + input1_in, + input1_size, + input1_shape, + input1_shape_ndim, + input2_in, + input2_size, + input2_shape, + input2_shape_ndim, + where, + dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_floor_divide_default_c)( - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*) = dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>; +void (*dpnp_floor_divide_default_c)(void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*) = + dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>; template -DPCTLSyclEventRef (*dpnp_floor_divide_ext_c)( - DPCTLSyclQueueRef, - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*, - const DPCTLEventVectorRef) = dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>; +DPCTLSyclEventRef (*dpnp_floor_divide_ext_c)(DPCTLSyclQueueRef, + void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*, + const DPCTLEventVectorRef) = + dpnp_floor_divide_c<_DataType_output, _DataType_input1, _DataType_input2>; template class dpnp_modf_c_kernel; @@ -768,12 +739,8 @@ void dpnp_modf_c(void* array1_in, void* result1_out, void* result2_out, size_t s { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_modf_c<_DataType_input, _DataType_output>(q_ref, - array1_in, - result1_out, - result2_out, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_modf_c<_DataType_input, _DataType_output>( + q_ref, array1_in, result1_out, result2_out, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -782,12 +749,8 @@ template void (*dpnp_modf_default_c)(void*, void*, void*, size_t) = dpnp_modf_c<_DataType_input, _DataType_output>; template -DPCTLSyclEventRef (*dpnp_modf_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_modf_c<_DataType_input, _DataType_output>; +DPCTLSyclEventRef (*dpnp_modf_ext_c)(DPCTLSyclQueueRef, void*, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_modf_c<_DataType_input, _DataType_output>; template class dpnp_remainder_c_kernel; @@ -903,50 +866,50 @@ void dpnp_remainder_c(void* result_out, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>( - q_ref, - result_out, - input1_in, - input1_size, - input1_shape, - input1_shape_ndim, - input2_in, - input2_size, - input2_shape, - input2_shape_ndim, - where, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>(q_ref, + result_out, + input1_in, + input1_size, + input1_shape, + input1_shape_ndim, + input2_in, + input2_size, + input2_shape, + input2_shape_ndim, + where, + dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_remainder_default_c)( - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*) = dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>; +void (*dpnp_remainder_default_c)(void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*) = + dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>; template -DPCTLSyclEventRef (*dpnp_remainder_ext_c)( - DPCTLSyclQueueRef, - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*, - const DPCTLEventVectorRef) = dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>; +DPCTLSyclEventRef (*dpnp_remainder_ext_c)(DPCTLSyclQueueRef, + void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*, + const DPCTLEventVectorRef) = + dpnp_remainder_c<_DataType_output, _DataType_input1, _DataType_input2>; template class dpnp_trapz_c_kernel; @@ -1039,36 +1002,20 @@ void dpnp_trapz_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>(q_ref, - array1_in, - array2_in, - result1, - dx, - array1_size, - array2_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>( + q_ref, array1_in, array2_in, result1, dx, array1_size, array2_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_trapz_default_c)(const void*, - const void*, - void*, - double, - size_t, - size_t) = dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>; +void (*dpnp_trapz_default_c)(const void*, const void*, void*, double, size_t, size_t) = + dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>; template DPCTLSyclEventRef (*dpnp_trapz_ext_c)( - DPCTLSyclQueueRef, - const void*, - const void*, - void*, - double, - size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>; + DPCTLSyclQueueRef, const void*, const void*, void*, double, size_t, size_t, const DPCTLEventVectorRef) = + dpnp_trapz_c<_DataType_input1, _DataType_input2, _DataType_output>; void func_map_init_mathematical(func_map_t& fmap) { @@ -1076,19 +1023,15 @@ void func_map_init_mathematical(func_map_t& fmap) (void*)dpnp_elemwise_absolute_default_c}; fmap[DPNPFuncName::DPNP_FN_ABSOLUTE][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_elemwise_absolute_default_c}; - fmap[DPNPFuncName::DPNP_FN_ABSOLUTE][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_elemwise_absolute_default_c}; - fmap[DPNPFuncName::DPNP_FN_ABSOLUTE][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_elemwise_absolute_default_c}; + fmap[DPNPFuncName::DPNP_FN_ABSOLUTE][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_elemwise_absolute_default_c}; + fmap[DPNPFuncName::DPNP_FN_ABSOLUTE][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_elemwise_absolute_default_c}; fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_elemwise_absolute_ext_c}; fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_elemwise_absolute_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_elemwise_absolute_ext_c}; - fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_elemwise_absolute_ext_c}; + fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_elemwise_absolute_ext_c}; + fmap[DPNPFuncName::DPNP_FN_ABSOLUTE_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_elemwise_absolute_ext_c}; fmap[DPNPFuncName::DPNP_FN_AROUND][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_around_default_c}; fmap[DPNPFuncName::DPNP_FN_AROUND][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_around_default_c}; @@ -1120,16 +1063,13 @@ void func_map_init_mathematical(func_map_t& fmap) (void*)dpnp_cross_default_c}; fmap[DPNPFuncName::DPNP_FN_CROSS][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_cross_default_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_cross_default_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_cross_default_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_cross_default_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_cross_default_c}; fmap[DPNPFuncName::DPNP_FN_CROSS][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_cross_default_c}; fmap[DPNPFuncName::DPNP_FN_CROSS][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_cross_default_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_cross_default_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_cross_default_c}; fmap[DPNPFuncName::DPNP_FN_CROSS][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_cross_default_c}; @@ -1153,16 +1093,13 @@ void func_map_init_mathematical(func_map_t& fmap) (void*)dpnp_cross_ext_c}; fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_cross_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_cross_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_cross_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_cross_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_cross_ext_c}; fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_cross_ext_c}; fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_cross_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_cross_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_cross_ext_c}; fmap[DPNPFuncName::DPNP_FN_CROSS_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_cross_ext_c}; @@ -1276,67 +1213,67 @@ void func_map_init_mathematical(func_map_t& fmap) eft_INT, (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_INT][eft_DBL] = { eft_DBL, (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_LNG][eft_DBL] = { eft_DBL, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_INT] = { eft_DBL, (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_LNG] = { eft_DBL, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_default_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_default_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_remainder_default_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_INT][eft_INT] = { eft_INT, (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_INT][eft_DBL] = { eft_DBL, (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_LNG][eft_DBL] = { eft_DBL, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_INT] = { eft_DBL, (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_LNG] = { eft_DBL, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; - fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; + fmap[DPNPFuncName::DPNP_FN_REMAINDER_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_remainder_ext_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_INT][eft_INT] = {eft_DBL, (void*)dpnp_trapz_default_c}; @@ -1358,16 +1295,13 @@ void func_map_init_mathematical(func_map_t& fmap) (void*)dpnp_trapz_default_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_trapz_default_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_trapz_default_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_trapz_default_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_trapz_default_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_trapz_default_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_trapz_default_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_trapz_default_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_trapz_default_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_trapz_default_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_trapz_default_c}; @@ -1391,16 +1325,13 @@ void func_map_init_mathematical(func_map_t& fmap) (void*)dpnp_trapz_ext_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_FLT][eft_LNG] = {eft_DBL, (void*)dpnp_trapz_ext_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_trapz_ext_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_FLT][eft_DBL] = {eft_DBL, - (void*)dpnp_trapz_ext_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_trapz_ext_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_FLT][eft_DBL] = {eft_DBL, (void*)dpnp_trapz_ext_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_DBL][eft_INT] = {eft_DBL, (void*)dpnp_trapz_ext_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_DBL][eft_LNG] = {eft_DBL, (void*)dpnp_trapz_ext_c}; - fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_DBL][eft_FLT] = {eft_DBL, - (void*)dpnp_trapz_ext_c}; + fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_DBL][eft_FLT] = {eft_DBL, (void*)dpnp_trapz_ext_c}; fmap[DPNPFuncName::DPNP_FN_TRAPZ_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_trapz_ext_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_random.cpp b/dpnp/backend/kernels/dpnp_krnl_random.cpp index 53207e67ff3e..2e314c60a5c1 100644 --- a/dpnp/backend/kernels/dpnp_krnl_random.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_random.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -112,12 +112,7 @@ void dpnp_rng_beta_c(void* result, const _DataType a, const _DataType b, const s { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_beta_c<_DataType>(q_ref, - result, - a, - b, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_beta_c<_DataType>(q_ref, result, a, b, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -177,7 +172,6 @@ DPCTLSyclEventRef dpnp_rng_binomial_c(DPCTLSyclQueueRef q_ref, event_ref = reinterpret_cast(&event_out); } return DPCTLEvent_Copy(event_ref); - } template @@ -185,12 +179,7 @@ void dpnp_rng_binomial_c(void* result, const int ntrial, const double p, const s { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_binomial_c<_DataType>(q_ref, - result, - ntrial, - p, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_binomial_c<_DataType>(q_ref, result, ntrial, p, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -206,11 +195,8 @@ DPCTLSyclEventRef (*dpnp_rng_binomial_ext_c)(DPCTLSyclQueueRef, const DPCTLEventVectorRef) = dpnp_rng_binomial_c<_DataType>; template -DPCTLSyclEventRef dpnp_rng_chisquare_c(DPCTLSyclQueueRef q_ref, - void* result, - const int df, - const size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_rng_chisquare_c( + DPCTLSyclQueueRef q_ref, void* result, const int df, const size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -239,11 +225,7 @@ void dpnp_rng_chisquare_c(void* result, const int df, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_chisquare_c<_DataType>(q_ref, - result, - df, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_chisquare_c<_DataType>(q_ref, result, df, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -251,11 +233,8 @@ template void (*dpnp_rng_chisquare_default_c)(void*, const int, const size_t) = dpnp_rng_chisquare_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_chisquare_ext_c)(DPCTLSyclQueueRef, - void*, - const int, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_chisquare_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_chisquare_ext_c)( + DPCTLSyclQueueRef, void*, const int, const size_t, const DPCTLEventVectorRef) = dpnp_rng_chisquare_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_exponential_c(DPCTLSyclQueueRef q_ref, @@ -295,11 +274,7 @@ void dpnp_rng_exponential_c(void* result, const _DataType beta, const size_t siz { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_exponential_c<_DataType>(q_ref, - result, - beta, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_exponential_c<_DataType>(q_ref, result, beta, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -308,10 +283,10 @@ void (*dpnp_rng_exponential_default_c)(void*, const _DataType, const size_t) = d template DPCTLSyclEventRef (*dpnp_rng_exponential_ext_c)(DPCTLSyclQueueRef, - void*, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_exponential_c<_DataType>; + void*, + const _DataType, + const size_t, + const DPCTLEventVectorRef) = dpnp_rng_exponential_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_f_c(DPCTLSyclQueueRef q_ref, @@ -352,13 +327,8 @@ DPCTLSyclEventRef dpnp_rng_f_c(DPCTLSyclQueueRef q_ref, mkl_rng::gamma<_DataType> gamma_distribution2(shape, d_zero, scale); auto event_gamma_distribution2 = mkl_rng::generate(gamma_distribution2, DPNP_RNG_ENGINE, size, den); - auto event_out = mkl_vm::div(q, - size, - result1, - den, - result1, - {event_gamma_distribution1, event_gamma_distribution2}, - mkl_vm::mode::ha); + auto event_out = mkl_vm::div( + q, size, result1, den, result1, {event_gamma_distribution1, event_gamma_distribution2}, mkl_vm::mode::ha); event_out.wait(); sycl::free(den, q); @@ -371,12 +341,7 @@ void dpnp_rng_f_c(void* result, const _DataType df_num, const _DataType df_den, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_f_c<_DataType>(q_ref, - result, - df_num, - df_den, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_f_c<_DataType>(q_ref, result, df_num, df_den, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -385,19 +350,19 @@ void (*dpnp_rng_f_default_c)(void*, const _DataType, const _DataType, const size template DPCTLSyclEventRef (*dpnp_rng_f_ext_c)(DPCTLSyclQueueRef, - void*, - const _DataType, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_f_c<_DataType>; + void*, + const _DataType, + const _DataType, + const size_t, + const DPCTLEventVectorRef) = dpnp_rng_f_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_gamma_c(DPCTLSyclQueueRef q_ref, - void* result, - const _DataType shape, - const _DataType scale, - const size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) + void* result, + const _DataType shape, + const _DataType scale, + const size_t size, + const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -434,12 +399,7 @@ void dpnp_rng_gamma_c(void* result, const _DataType shape, const _DataType scale { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_gamma_c<_DataType>(q_ref, - result, - shape, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_gamma_c<_DataType>(q_ref, result, shape, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -489,20 +449,13 @@ void dpnp_rng_gaussian_c(void* result, const _DataType mean, const _DataType std { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_gaussian_c<_DataType>(q_ref, - result, - mean, - stddev, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_gaussian_c<_DataType>(q_ref, result, mean, stddev, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_gaussian_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_gaussian_c<_DataType>; +void (*dpnp_rng_gaussian_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_gaussian_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_gaussian_ext_c)(DPCTLSyclQueueRef, @@ -552,11 +505,7 @@ void dpnp_rng_geometric_c(void* result, const float p, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_geometric_c<_DataType>(q_ref, - result, - p, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_geometric_c<_DataType>(q_ref, result, p, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -564,11 +513,8 @@ template void (*dpnp_rng_geometric_default_c)(void*, const float, const size_t) = dpnp_rng_geometric_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_geometric_ext_c)(DPCTLSyclQueueRef, - void*, - const float, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_geometric_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_geometric_ext_c)( + DPCTLSyclQueueRef, void*, const float, const size_t, const DPCTLEventVectorRef) = dpnp_rng_geometric_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_gumbel_c(DPCTLSyclQueueRef q_ref, @@ -621,12 +567,7 @@ void dpnp_rng_gumbel_c(void* result, const double loc, const double scale, const { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_gumbel_c<_DataType>(q_ref, - result, - loc, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_gumbel_c<_DataType>(q_ref, result, loc, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -682,7 +623,6 @@ DPCTLSyclEventRef dpnp_rng_hypergeometric_c(DPCTLSyclQueueRef q_ref, mkl_rng::hypergeometric<_DataType> distribution(l, s, m); event_out = mkl_rng::generate(distribution, DPNP_RNG_ENGINE, size, result1); event_ref = reinterpret_cast(&event_out); - } return DPCTLEvent_Copy(event_ref); } @@ -692,22 +632,13 @@ void dpnp_rng_hypergeometric_c(void* result, const int l, const int s, const int { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_hypergeometric_c<_DataType>(q_ref, - result, - l, - s, - m, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_hypergeometric_c<_DataType>(q_ref, result, l, s, m, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_hypergeometric_default_c)(void*, - const int, - const int, - const int, - const size_t) = dpnp_rng_hypergeometric_c<_DataType>; +void (*dpnp_rng_hypergeometric_default_c)(void*, const int, const int, const int, const size_t) = + dpnp_rng_hypergeometric_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_hypergeometric_ext_c)(DPCTLSyclQueueRef, @@ -759,12 +690,7 @@ void dpnp_rng_laplace_c(void* result, const double loc, const double scale, cons { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_laplace_c<_DataType>(q_ref, - result, - loc, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_laplace_c<_DataType>(q_ref, result, loc, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -832,12 +758,7 @@ void dpnp_rng_logistic_c(void* result, const double loc, const double scale, con { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_logistic_c<_DataType>(q_ref, - result, - loc, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_logistic_c<_DataType>(q_ref, result, loc, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -901,20 +822,13 @@ void dpnp_rng_lognormal_c(void* result, const _DataType mean, const _DataType st { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_lognormal_c<_DataType>(q_ref, - result, - mean, - stddev, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_lognormal_c<_DataType>(q_ref, result, mean, stddev, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_lognormal_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_lognormal_c<_DataType>; +void (*dpnp_rng_lognormal_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_lognormal_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_lognormal_ext_c)(DPCTLSyclQueueRef, @@ -990,36 +904,27 @@ DPCTLSyclEventRef dpnp_rng_multinomial_c(DPCTLSyclQueueRef q_ref, } template -void dpnp_rng_multinomial_c( - void* result, const int ntrial, void* p_in, const size_t p_size, const size_t size) +void dpnp_rng_multinomial_c(void* result, const int ntrial, void* p_in, const size_t p_size, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_multinomial_c<_DataType>(q_ref, - result, - ntrial, - p_in, - p_size, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_multinomial_c<_DataType>(q_ref, result, ntrial, p_in, p_size, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_multinomial_default_c)(void*, - const int, - void*, - const size_t, - const size_t) = dpnp_rng_multinomial_c<_DataType>; +void (*dpnp_rng_multinomial_default_c)(void*, const int, void*, const size_t, const size_t) = + dpnp_rng_multinomial_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_multinomial_ext_c)(DPCTLSyclQueueRef, - void*, - const int, - void*, - const size_t, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_multinomial_c<_DataType>; + void*, + const int, + void*, + const size_t, + const size_t, + const DPCTLEventVectorRef) = dpnp_rng_multinomial_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_multivariate_normal_c(DPCTLSyclQueueRef q_ref, @@ -1077,15 +982,8 @@ void dpnp_rng_multivariate_normal_c(void* result, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_multivariate_normal_c<_DataType>(q_ref, - result, - dimen, - mean_in, - mean_size, - cov_in, - cov_size, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_multivariate_normal_c<_DataType>( + q_ref, result, dimen, mean_in, mean_size, cov_in, cov_size, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1107,7 +1005,8 @@ DPCTLSyclEventRef (*dpnp_rng_multivariate_normal_ext_c)(DPCTLSyclQueueRef, void*, const size_t, const size_t, - const DPCTLEventVectorRef) = dpnp_rng_multivariate_normal_c<_DataType>; + const DPCTLEventVectorRef) = + dpnp_rng_multivariate_normal_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_negative_binomial_c(DPCTLSyclQueueRef q_ref, @@ -1142,29 +1041,18 @@ void dpnp_rng_negative_binomial_c(void* result, const double a, const double p, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_negative_binomial_c<_DataType>(q_ref, - result, - a, - p, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_negative_binomial_c<_DataType>(q_ref, result, a, p, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_negative_binomial_default_c)(void*, - const double, - const double, - const size_t) = dpnp_rng_negative_binomial_c<_DataType>; +void (*dpnp_rng_negative_binomial_default_c)(void*, const double, const double, const size_t) = + dpnp_rng_negative_binomial_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_negative_binomial_ext_c)( - DPCTLSyclQueueRef, - void*, - const double, - const double, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_negative_binomial_c<_DataType>; + DPCTLSyclQueueRef, void*, const double, const double, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_negative_binomial_c<_DataType>; template class dpnp_rng_noncentral_chisquare_c_kernel1; @@ -1323,29 +1211,19 @@ void dpnp_rng_noncentral_chisquare_c(void* result, const _DataType df, const _Da { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_noncentral_chisquare_c<_DataType>(q_ref, - result, - df, - nonc, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_noncentral_chisquare_c<_DataType>(q_ref, result, df, nonc, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_noncentral_chisquare_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_noncentral_chisquare_c<_DataType>; +void (*dpnp_rng_noncentral_chisquare_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_noncentral_chisquare_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_noncentral_chisquare_ext_c)( - DPCTLSyclQueueRef, - void*, - const _DataType, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_noncentral_chisquare_c<_DataType>; + DPCTLSyclQueueRef, void*, const _DataType, const _DataType, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_noncentral_chisquare_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_normal_c(DPCTLSyclQueueRef q_ref, @@ -1382,20 +1260,12 @@ void dpnp_rng_normal_c(void* result, const _DataType mean, const _DataType stdde { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_normal_c<_DataType>(q_ref, - result, - mean, - stddev, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_normal_c<_DataType>(q_ref, result, mean, stddev, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_normal_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_normal_c<_DataType>; +void (*dpnp_rng_normal_default_c)(void*, const _DataType, const _DataType, const size_t) = dpnp_rng_normal_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_normal_ext_c)(DPCTLSyclQueueRef, @@ -1446,11 +1316,7 @@ void dpnp_rng_pareto_c(void* result, const double alpha, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_pareto_c<_DataType>(q_ref, - result, - alpha, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_pareto_c<_DataType>(q_ref, result, alpha, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1458,11 +1324,8 @@ template void (*dpnp_rng_pareto_default_c)(void*, const double, const size_t) = dpnp_rng_pareto_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_pareto_ext_c)(DPCTLSyclQueueRef, - void*, - const double, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_pareto_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_pareto_ext_c)( + DPCTLSyclQueueRef, void*, const double, const size_t, const DPCTLEventVectorRef) = dpnp_rng_pareto_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_poisson_c(DPCTLSyclQueueRef q_ref, @@ -1498,11 +1361,7 @@ void dpnp_rng_poisson_c(void* result, const double lambda, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_poisson_c<_DataType>(q_ref, - result, - lambda, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_poisson_c<_DataType>(q_ref, result, lambda, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1510,11 +1369,8 @@ template void (*dpnp_rng_poisson_default_c)(void*, const double, const size_t) = dpnp_rng_poisson_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_poisson_ext_c)(DPCTLSyclQueueRef, - void*, - const double, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_poisson_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_poisson_ext_c)( + DPCTLSyclQueueRef, void*, const double, const size_t, const DPCTLEventVectorRef) = dpnp_rng_poisson_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_power_c(DPCTLSyclQueueRef q_ref, @@ -1557,11 +1413,7 @@ void dpnp_rng_power_c(void* result, const double alpha, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_power_c<_DataType>(q_ref, - result, - alpha, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_power_c<_DataType>(q_ref, result, alpha, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1569,11 +1421,8 @@ template void (*dpnp_rng_power_default_c)(void*, const double, const size_t) = dpnp_rng_power_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_power_ext_c)(DPCTLSyclQueueRef, - void*, - const double, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_power_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_power_ext_c)( + DPCTLSyclQueueRef, void*, const double, const size_t, const DPCTLEventVectorRef) = dpnp_rng_power_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_rayleigh_c(DPCTLSyclQueueRef q_ref, @@ -1616,11 +1465,7 @@ void dpnp_rng_rayleigh_c(void* result, const _DataType scale, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_rayleigh_c<_DataType>(q_ref, - result, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_rayleigh_c<_DataType>(q_ref, result, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1716,22 +1561,14 @@ void dpnp_rng_shuffle_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_shuffle_c<_DataType>(q_ref, - result, - itemsize, - ndim, - high_dim_size, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_shuffle_c<_DataType>(q_ref, result, itemsize, ndim, high_dim_size, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_shuffle_default_c)(void*, - const size_t, - const size_t, - const size_t, - const size_t) = dpnp_rng_shuffle_c<_DataType>; +void (*dpnp_rng_shuffle_default_c)(void*, const size_t, const size_t, const size_t, const size_t) = + dpnp_rng_shuffle_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_shuffle_ext_c)(DPCTLSyclQueueRef, @@ -1779,10 +1616,7 @@ void dpnp_rng_standard_cauchy_c(void* result, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_standard_cauchy_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_standard_cauchy_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1790,10 +1624,8 @@ template void (*dpnp_rng_standard_cauchy_default_c)(void*, const size_t) = dpnp_rng_standard_cauchy_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_standard_cauchy_ext_c)(DPCTLSyclQueueRef, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_standard_cauchy_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_standard_cauchy_ext_c)(DPCTLSyclQueueRef, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_standard_cauchy_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_standard_exponential_c(DPCTLSyclQueueRef q_ref, @@ -1826,10 +1658,7 @@ void dpnp_rng_standard_exponential_c(void* result, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_standard_exponential_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_standard_exponential_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1838,10 +1667,7 @@ void (*dpnp_rng_standard_exponential_default_c)(void*, const size_t) = dpnp_rng_ template DPCTLSyclEventRef (*dpnp_rng_standard_exponential_ext_c)( - DPCTLSyclQueueRef, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_standard_exponential_c<_DataType>; + DPCTLSyclQueueRef, void*, const size_t, const DPCTLEventVectorRef) = dpnp_rng_standard_exponential_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_standard_gamma_c(DPCTLSyclQueueRef q_ref, @@ -1874,11 +1700,7 @@ void dpnp_rng_standard_gamma_c(void* result, const _DataType shape, const size_t { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_standard_gamma_c<_DataType>(q_ref, - result, - shape, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_standard_gamma_c<_DataType>(q_ref, result, shape, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1923,10 +1745,7 @@ void dpnp_rng_standard_normal_c(void* result, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_standard_normal_c<_DataType>(q_ref, - result, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_standard_normal_c<_DataType>(q_ref, result, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -1934,10 +1753,8 @@ template void (*dpnp_rng_standard_normal_default_c)(void*, const size_t) = dpnp_rng_standard_normal_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_standard_normal_ext_c)(DPCTLSyclQueueRef, - void*, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_standard_normal_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_standard_normal_ext_c)(DPCTLSyclQueueRef, void*, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_standard_normal_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_standard_t_c(DPCTLSyclQueueRef q_ref, @@ -1987,11 +1804,7 @@ void dpnp_rng_standard_t_c(void* result, const _DataType df, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_standard_t_c<_DataType>(q_ref, - result, - df, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_standard_t_c<_DataType>(q_ref, result, df, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -2096,22 +1909,14 @@ void dpnp_rng_triangular_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_triangular_c<_DataType>(q_ref, - result, - x_min, - x_mode, - x_max, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_triangular_c<_DataType>(q_ref, result, x_min, x_mode, x_max, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_triangular_default_c)(void*, - const _DataType, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_triangular_c<_DataType>; +void (*dpnp_rng_triangular_default_c)(void*, const _DataType, const _DataType, const _DataType, const size_t) = + dpnp_rng_triangular_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_triangular_ext_c)(DPCTLSyclQueueRef, @@ -2162,12 +1967,7 @@ void dpnp_rng_uniform_c(void* result, const long low, const long high, const siz { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_uniform_c<_DataType>(q_ref, - result, - low, - high, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_uniform_c<_DataType>(q_ref, result, low, high, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -2301,29 +2101,19 @@ void dpnp_rng_vonmises_large_kappa_c(void* result, const _DataType mu, const _Da { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_vonmises_large_kappa_c<_DataType>(q_ref, - result, - mu, - kappa, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_vonmises_large_kappa_c<_DataType>(q_ref, result, mu, kappa, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_vonmises_large_kappa_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_vonmises_large_kappa_c<_DataType>; +void (*dpnp_rng_vonmises_large_kappa_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_vonmises_large_kappa_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_vonmises_large_kappa_ext_c)( - DPCTLSyclQueueRef, - void*, - const _DataType, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_vonmises_large_kappa_c<_DataType>; + DPCTLSyclQueueRef, void*, const _DataType, const _DataType, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_vonmises_large_kappa_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_vonmises_small_kappa_c(DPCTLSyclQueueRef q_ref, @@ -2423,29 +2213,19 @@ void dpnp_rng_vonmises_small_kappa_c(void* result, const _DataType mu, const _Da { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_vonmises_small_kappa_c<_DataType>(q_ref, - result, - mu, - kappa, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_rng_vonmises_small_kappa_c<_DataType>(q_ref, result, mu, kappa, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_vonmises_small_kappa_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_vonmises_small_kappa_c<_DataType>; +void (*dpnp_rng_vonmises_small_kappa_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_vonmises_small_kappa_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_vonmises_small_kappa_ext_c)( - DPCTLSyclQueueRef, - void*, - const _DataType, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_vonmises_small_kappa_c<_DataType>; + DPCTLSyclQueueRef, void*, const _DataType, const _DataType, const size_t, const DPCTLEventVectorRef) = + dpnp_rng_vonmises_small_kappa_c<_DataType>; /* Vonmisses uses the rejection algorithm compared against the wrapped Cauchy distribution suggested by Best and Fisher and documented in @@ -2481,20 +2261,13 @@ void dpnp_rng_vonmises_c(void* result, const _DataType mu, const _DataType kappa { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_vonmises_c<_DataType>(q_ref, - result, - mu, - kappa, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_vonmises_c<_DataType>(q_ref, result, mu, kappa, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } template -void (*dpnp_rng_vonmises_default_c)(void*, - const _DataType, - const _DataType, - const size_t) = dpnp_rng_vonmises_c<_DataType>; +void (*dpnp_rng_vonmises_default_c)(void*, const _DataType, const _DataType, const size_t) = + dpnp_rng_vonmises_c<_DataType>; template DPCTLSyclEventRef (*dpnp_rng_vonmises_ext_c)(DPCTLSyclQueueRef, @@ -2591,16 +2364,10 @@ void dpnp_rng_wald_c(void* result, const _DataType mean, const _DataType scale, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_wald_c<_DataType>(q_ref, - result, - mean, - scale, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_wald_c<_DataType>(q_ref, result, mean, scale, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } - template void (*dpnp_rng_wald_default_c)(void*, const _DataType, const _DataType, const size_t) = dpnp_rng_wald_c<_DataType>; @@ -2654,11 +2421,7 @@ void dpnp_rng_weibull_c(void* result, const double alpha, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_weibull_c<_DataType>(q_ref, - result, - alpha, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_weibull_c<_DataType>(q_ref, result, alpha, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -2666,11 +2429,8 @@ template void (*dpnp_rng_weibull_default_c)(void*, const double, const size_t) = dpnp_rng_weibull_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_weibull_ext_c)(DPCTLSyclQueueRef, - void*, - const double, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_weibull_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_weibull_ext_c)( + DPCTLSyclQueueRef, void*, const double, const size_t, const DPCTLEventVectorRef) = dpnp_rng_weibull_c<_DataType>; template DPCTLSyclEventRef dpnp_rng_zipf_c(DPCTLSyclQueueRef q_ref, @@ -2748,11 +2508,7 @@ void dpnp_rng_zipf_c(void* result, const _DataType a, const size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_rng_zipf_c<_DataType>(q_ref, - result, - a, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_rng_zipf_c<_DataType>(q_ref, result, a, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -2760,11 +2516,8 @@ template void (*dpnp_rng_zipf_default_c)(void*, const _DataType, const size_t) = dpnp_rng_zipf_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_rng_zipf_ext_c)(DPCTLSyclQueueRef, - void*, - const _DataType, - const size_t, - const DPCTLEventVectorRef) = dpnp_rng_zipf_c<_DataType>; +DPCTLSyclEventRef (*dpnp_rng_zipf_ext_c)( + DPCTLSyclQueueRef, void*, const _DataType, const size_t, const DPCTLEventVectorRef) = dpnp_rng_zipf_c<_DataType>; void func_map_init_random(func_map_t& fmap) { @@ -2772,11 +2525,9 @@ void func_map_init_random(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_RNG_BETA_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_beta_ext_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_BINOMIAL][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_rng_binomial_default_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_BINOMIAL][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_rng_binomial_default_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_BINOMIAL_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_rng_binomial_ext_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_BINOMIAL_EXT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_rng_binomial_ext_c}; fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_chisquare_default_c}; @@ -2805,16 +2556,14 @@ void func_map_init_random(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_gaussian_default_c}; fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_rng_gaussian_default_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_rng_gaussian_ext_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN_EXT][eft_FLT][eft_FLT] = {eft_FLT, - (void*)dpnp_rng_gaussian_ext_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_gaussian_ext_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_GAUSSIAN_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_rng_gaussian_ext_c}; fmap[DPNPFuncName::DPNP_FN_RNG_GEOMETRIC][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_rng_geometric_default_c}; fmap[DPNPFuncName::DPNP_FN_RNG_GEOMETRIC_EXT][eft_INT][eft_INT] = {eft_INT, - (void*)dpnp_rng_geometric_ext_c}; + (void*)dpnp_rng_geometric_ext_c}; fmap[DPNPFuncName::DPNP_FN_RNG_GUMBEL][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_gumbel_default_c}; @@ -2917,17 +2666,17 @@ void func_map_init_random(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_NORMAL_EXT][eft_DBL][eft_DBL] = { eft_DBL, (void*)dpnp_rng_standard_normal_ext_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_T][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_rng_standard_t_default_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_T][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_rng_standard_t_default_c}; - fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_T_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_rng_standard_t_ext_c}; + fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_T_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_rng_standard_t_ext_c}; fmap[DPNPFuncName::DPNP_FN_RNG_TRIANGULAR][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_triangular_default_c}; fmap[DPNPFuncName::DPNP_FN_RNG_TRIANGULAR_EXT][eft_DBL][eft_DBL] = {eft_DBL, - (void*)dpnp_rng_triangular_ext_c}; + (void*)dpnp_rng_triangular_ext_c}; fmap[DPNPFuncName::DPNP_FN_RNG_UNIFORM][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_rng_uniform_default_c}; fmap[DPNPFuncName::DPNP_FN_RNG_UNIFORM][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_rng_uniform_default_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp index e03aabee5aec..d386f250d963 100644 --- a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -123,8 +123,7 @@ DPCTLSyclEventRef dpnp_sum_c(DPCTLSyclQueueRef q_ref, input_it.set_axes(axes, axes_ndim); const size_t output_size = input_it.get_output_size(); - auto policy = - oneapi::dpl::execution::make_device_policy>(q); + auto policy = oneapi::dpl::execution::make_device_policy>(q); for (size_t output_id = 0; output_id < output_size; ++output_id) { // type of "init" determine internal algorithm accumulator type @@ -132,8 +131,7 @@ DPCTLSyclEventRef dpnp_sum_c(DPCTLSyclQueueRef q_ref, policy, input_it.begin(output_id), input_it.end(output_id), init, std::plus<_DataType_output>()); policy.queue().wait(); // TODO move out of the loop - q.memcpy( - result + output_id, &accumulator, sizeof(_DataType_output)).wait(); // result[output_id] = accumulator; + q.memcpy(result + output_id, &accumulator, sizeof(_DataType_output)).wait(); // result[output_id] = accumulator; } return event_ref; @@ -151,16 +149,8 @@ void dpnp_sum_c(void* result_out, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_sum_c<_DataType_output, _DataType_input>(q_ref, - result_out, - input_in, - input_shape, - input_shape_ndim, - axes, - axes_ndim, - initial, - where, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_sum_c<_DataType_output, _DataType_input>( + q_ref, result_out, input_in, input_shape, input_shape_ndim, axes, axes_ndim, initial, where, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -240,8 +230,7 @@ DPCTLSyclEventRef dpnp_prod_c(DPCTLSyclQueueRef q_ref, input_it.set_axes(axes, axes_ndim); const size_t output_size = input_it.get_output_size(); - auto policy = - oneapi::dpl::execution::make_device_policy>(q); + auto policy = oneapi::dpl::execution::make_device_policy>(q); for (size_t output_id = 0; output_id < output_size; ++output_id) { // type of "init" determine internal algorithm accumulator type @@ -249,8 +238,7 @@ DPCTLSyclEventRef dpnp_prod_c(DPCTLSyclQueueRef q_ref, policy, input_it.begin(output_id), input_it.end(output_id), init, std::multiplies<_DataType_output>()); policy.queue().wait(); // TODO move out of the loop - q.memcpy( - result + output_id, &accumulator, sizeof(_DataType_output)).wait(); // result[output_id] = accumulator; + q.memcpy(result + output_id, &accumulator, sizeof(_DataType_output)).wait(); // result[output_id] = accumulator; } return event_ref; @@ -268,16 +256,8 @@ void dpnp_prod_c(void* result_out, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_prod_c<_DataType_output, _DataType_input>(q_ref, - result_out, - input_in, - input_shape, - input_shape_ndim, - axes, - axes_ndim, - initial, - where, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_prod_c<_DataType_output, _DataType_input>( + q_ref, result_out, input_in, input_shape, input_shape_ndim, axes, axes_ndim, initial, where, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } diff --git a/dpnp/backend/kernels/dpnp_krnl_searching.cpp b/dpnp/backend/kernels/dpnp_krnl_searching.cpp index 39156ea07c44..38bd1e021033 100644 --- a/dpnp/backend/kernels/dpnp_krnl_searching.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_searching.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -34,11 +34,8 @@ template class dpnp_argmax_c_kernel; template -DPCTLSyclEventRef dpnp_argmax_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_argmax_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -50,8 +47,7 @@ DPCTLSyclEventRef dpnp_argmax_c(DPCTLSyclQueueRef q_ref, _DataType* array_1 = input1_ptr.get_ptr(); _idx_DataType* result = reinterpret_cast<_idx_DataType*>(result1); - auto policy = - oneapi::dpl::execution::make_device_policy>(q); + auto policy = oneapi::dpl::execution::make_device_policy>(q); _DataType* res = std::max_element(policy, array_1, array_1 + size); policy.queue().wait(); @@ -67,11 +63,8 @@ void dpnp_argmax_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_argmax_c<_DataType, _idx_DataType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_argmax_c<_DataType, _idx_DataType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -79,21 +72,15 @@ template void (*dpnp_argmax_default_c)(void*, void*, size_t) = dpnp_argmax_c<_DataType, _idx_DataType>; template -DPCTLSyclEventRef (*dpnp_argmax_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_argmax_c<_DataType, _idx_DataType>; +DPCTLSyclEventRef (*dpnp_argmax_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_argmax_c<_DataType, _idx_DataType>; template class dpnp_argmin_c_kernel; template -DPCTLSyclEventRef dpnp_argmin_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_argmin_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -104,8 +91,7 @@ DPCTLSyclEventRef dpnp_argmin_c(DPCTLSyclQueueRef q_ref, _DataType* array_1 = input1_ptr.get_ptr(); _idx_DataType* result = reinterpret_cast<_idx_DataType*>(result1); - auto policy = - oneapi::dpl::execution::make_device_policy>(q); + auto policy = oneapi::dpl::execution::make_device_policy>(q); _DataType* res = std::min_element(policy, array_1, array_1 + size); policy.queue().wait(); @@ -121,11 +107,8 @@ void dpnp_argmin_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_argmin_c<_DataType, _idx_DataType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_argmin_c<_DataType, _idx_DataType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -133,11 +116,8 @@ template void (*dpnp_argmin_default_c)(void*, void*, size_t) = dpnp_argmin_c<_DataType, _idx_DataType>; template -DPCTLSyclEventRef (*dpnp_argmin_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_argmin_c<_DataType, _idx_DataType>; +DPCTLSyclEventRef (*dpnp_argmin_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_argmin_c<_DataType, _idx_DataType>; void func_map_init_searching(func_map_t& fmap) { diff --git a/dpnp/backend/kernels/dpnp_krnl_sorting.cpp b/dpnp/backend/kernels/dpnp_krnl_sorting.cpp index 614bb94f0705..07070c04bc74 100644 --- a/dpnp/backend/kernels/dpnp_krnl_sorting.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_sorting.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -51,11 +51,8 @@ template class dpnp_argsort_c_kernel; template -DPCTLSyclEventRef dpnp_argsort_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_argsort_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -70,8 +67,7 @@ DPCTLSyclEventRef dpnp_argsort_c(DPCTLSyclQueueRef q_ref, std::iota(result, result + size, 0); - auto policy = - oneapi::dpl::execution::make_device_policy>(q); + auto policy = oneapi::dpl::execution::make_device_policy>(q); std::sort(policy, result, result + size, _argsort_less<_DataType, _idx_DataType>(array_1)); @@ -85,11 +81,8 @@ void dpnp_argsort_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_argsort_c<_DataType, _idx_DataType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_argsort_c<_DataType, _idx_DataType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -98,11 +91,8 @@ template void (*dpnp_argsort_default_c)(void*, void*, size_t) = dpnp_argsort_c<_DataType, _idx_DataType>; template -DPCTLSyclEventRef (*dpnp_argsort_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_argsort_c<_DataType, _idx_DataType>; +DPCTLSyclEventRef (*dpnp_argsort_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_argsort_c<_DataType, _idx_DataType>; // template void dpnp_argsort_c(void* array1_in, void* result1, size_t size); // template void dpnp_argsort_c(void* array1_in, void* result1, size_t size); @@ -189,8 +179,7 @@ DPCTLSyclEventRef dpnp_partition_c(DPCTLSyclQueueRef q_ref, } } - shape_elem_type* shape = reinterpret_cast(sycl::malloc_shared(ndim * sizeof(shape_elem_type), - q)); + shape_elem_type* shape = reinterpret_cast(sycl::malloc_shared(ndim * sizeof(shape_elem_type), q)); auto memcpy_event = q.memcpy(shape, shape_, ndim * sizeof(shape_elem_type)); memcpy_event.wait(); @@ -234,35 +223,25 @@ void dpnp_partition_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_partition_c<_DataType>(q_ref, - array1_in, - array2_in, - result1, - kth, - shape_, - ndim, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_partition_c<_DataType>(q_ref, array1_in, array2_in, result1, kth, shape_, ndim, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_partition_default_c)(void*, - void*, - void*, - const size_t, - const shape_elem_type*, - const size_t) = dpnp_partition_c<_DataType>; +void (*dpnp_partition_default_c)(void*, void*, void*, const size_t, const shape_elem_type*, const size_t) = + dpnp_partition_c<_DataType>; template DPCTLSyclEventRef (*dpnp_partition_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - const size_t, - const shape_elem_type*, - const size_t, - const DPCTLEventVectorRef) = dpnp_partition_c<_DataType>; + void*, + void*, + void*, + const size_t, + const shape_elem_type*, + const size_t, + const DPCTLEventVectorRef) = dpnp_partition_c<_DataType>; template class dpnp_searchsorted_c_kernel; @@ -387,45 +366,27 @@ void dpnp_searchsorted_c( { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_searchsorted_c<_DataType, _IndexingType>(q_ref, - result1, - array1_in, - v1_in, - side, - arr_size, - v_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_searchsorted_c<_DataType, _IndexingType>( + q_ref, result1, array1_in, v1_in, side, arr_size, v_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_searchsorted_default_c)(void*, - const void*, - const void*, - bool, - const size_t, - const size_t) = dpnp_searchsorted_c<_DataType, _IndexingType>; +void (*dpnp_searchsorted_default_c)(void*, const void*, const void*, bool, const size_t, const size_t) = + dpnp_searchsorted_c<_DataType, _IndexingType>; template -DPCTLSyclEventRef (*dpnp_searchsorted_ext_c)(DPCTLSyclQueueRef, - void*, - const void*, - const void*, - bool, - const size_t, - const size_t, - const DPCTLEventVectorRef) = dpnp_searchsorted_c<_DataType, _IndexingType>; +DPCTLSyclEventRef (*dpnp_searchsorted_ext_c)( + DPCTLSyclQueueRef, void*, const void*, const void*, bool, const size_t, const size_t, const DPCTLEventVectorRef) = + dpnp_searchsorted_c<_DataType, _IndexingType>; template class dpnp_sort_c_kernel; template -DPCTLSyclEventRef dpnp_sort_c(DPCTLSyclQueueRef q_ref, - void* array1_in, - void* result1, - size_t size, - const DPCTLEventVectorRef dep_event_vec_ref) +DPCTLSyclEventRef dpnp_sort_c( + DPCTLSyclQueueRef q_ref, void* array1_in, void* result1, size_t size, const DPCTLEventVectorRef dep_event_vec_ref) { // avoid warning unused variable (void)dep_event_vec_ref; @@ -456,11 +417,7 @@ void dpnp_sort_c(void* array1_in, void* result1, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_sort_c<_DataType>(q_ref, - array1_in, - result1, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_sort_c<_DataType>(q_ref, array1_in, result1, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -469,11 +426,8 @@ template void (*dpnp_sort_default_c)(void*, void*, size_t) = dpnp_sort_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_sort_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_sort_c<_DataType>; +DPCTLSyclEventRef (*dpnp_sort_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_sort_c<_DataType>; void func_map_init_sorting(func_map_t& fmap) { @@ -497,23 +451,23 @@ void func_map_init_sorting(func_map_t& fmap) fmap[DPNPFuncName::DPNP_FN_PARTITION_EXT][eft_FLT][eft_FLT] = {eft_FLT, (void*)dpnp_partition_ext_c}; fmap[DPNPFuncName::DPNP_FN_PARTITION_EXT][eft_DBL][eft_DBL] = {eft_DBL, (void*)dpnp_partition_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_INT][eft_INT] = { - eft_INT, (void*)dpnp_searchsorted_default_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_LNG][eft_LNG] = { - eft_LNG, (void*)dpnp_searchsorted_default_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_searchsorted_default_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_searchsorted_default_c}; - - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_INT][eft_INT] = { - eft_INT, (void*)dpnp_searchsorted_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void*)dpnp_searchsorted_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_searchsorted_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_searchsorted_ext_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_INT][eft_INT] = {eft_INT, + (void*)dpnp_searchsorted_default_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_LNG][eft_LNG] = {eft_LNG, + (void*)dpnp_searchsorted_default_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_searchsorted_default_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_searchsorted_default_c}; + + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_INT][eft_INT] = {eft_INT, + (void*)dpnp_searchsorted_ext_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_LNG][eft_LNG] = {eft_LNG, + (void*)dpnp_searchsorted_ext_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_searchsorted_ext_c}; + fmap[DPNPFuncName::DPNP_FN_SEARCHSORTED_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_searchsorted_ext_c}; fmap[DPNPFuncName::DPNP_FN_SORT][eft_INT][eft_INT] = {eft_INT, (void*)dpnp_sort_default_c}; fmap[DPNPFuncName::DPNP_FN_SORT][eft_LNG][eft_LNG] = {eft_LNG, (void*)dpnp_sort_default_c}; diff --git a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp index abf77ff25eec..06fccb2cff90 100644 --- a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -87,50 +87,50 @@ void dpnp_correlate_c(void* result_out, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>( - q_ref, - result_out, - input1_in, - input1_size, - input1_shape, - input1_shape_ndim, - input2_in, - input2_size, - input2_shape, - input2_shape_ndim, - where, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>(q_ref, + result_out, + input1_in, + input1_size, + input1_shape, + input1_shape_ndim, + input2_in, + input2_size, + input2_shape, + input2_shape_ndim, + where, + dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_correlate_default_c)( - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*) = dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>; +void (*dpnp_correlate_default_c)(void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*) = + dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>; template -DPCTLSyclEventRef (*dpnp_correlate_ext_c)( - DPCTLSyclQueueRef, - void*, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const void*, - const size_t, - const shape_elem_type*, - const size_t, - const size_t*, - const DPCTLEventVectorRef) = dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>; +DPCTLSyclEventRef (*dpnp_correlate_ext_c)(DPCTLSyclQueueRef, + void*, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const void*, + const size_t, + const shape_elem_type*, + const size_t, + const size_t*, + const DPCTLEventVectorRef) = + dpnp_correlate_c<_DataType_output, _DataType_input1, _DataType_input2>; template class dpnp_cov_c_kernel1; @@ -186,7 +186,7 @@ DPCTLSyclEventRef dpnp_cov_c(DPCTLSyclQueueRef q_ref, const _DataType alpha = _DataType(1) / (ncols - 1); const _DataType beta = _DataType(0); - event_syrk = mkl_blas::syrk(q, // queue &exec_queue, + event_syrk = mkl_blas::syrk(q, // queue &exec_queue, oneapi::mkl::uplo::upper, // uplo upper_lower, oneapi::mkl::transpose::nontrans, // transpose trans, nrows, // std::int64_t n, @@ -231,12 +231,7 @@ void dpnp_cov_c(void* array1_in, void* result1, size_t nrows, size_t ncols) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_cov_c<_DataType>(q_ref, - array1_in, - result1, - nrows, - ncols, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_cov_c<_DataType>(q_ref, array1_in, result1, nrows, ncols, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); } @@ -244,12 +239,8 @@ template void (*dpnp_cov_default_c)(void*, void*, size_t, size_t) = dpnp_cov_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_cov_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_cov_c<_DataType>; +DPCTLSyclEventRef (*dpnp_cov_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, size_t, const DPCTLEventVectorRef) = + dpnp_cov_c<_DataType>; template DPCTLSyclEventRef dpnp_count_nonzero_c(DPCTLSyclQueueRef q_ref, @@ -293,11 +284,8 @@ void dpnp_count_nonzero_c(void* array1_in, void* result1_out, size_t size) { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_count_nonzero_c<_DataType_input, _DataType_output>(q_ref, - array1_in, - result1_out, - size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_count_nonzero_c<_DataType_input, _DataType_output>(q_ref, array1_in, result1_out, size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -306,12 +294,8 @@ template void (*dpnp_count_nonzero_default_c)(void*, void*, size_t) = dpnp_count_nonzero_c<_DataType_input, _DataType_output>; template -DPCTLSyclEventRef (*dpnp_count_nonzero_ext_c)( - DPCTLSyclQueueRef, - void*, - void*, - size_t, - const DPCTLEventVectorRef) = dpnp_count_nonzero_c<_DataType_input, _DataType_output>; +DPCTLSyclEventRef (*dpnp_count_nonzero_ext_c)(DPCTLSyclQueueRef, void*, void*, size_t, const DPCTLEventVectorRef) = + dpnp_count_nonzero_c<_DataType_input, _DataType_output>; template class dpnp_max_c_kernel; @@ -531,27 +515,15 @@ void dpnp_max_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_max_c<_DataType>(q_ref, - array1_in, - result1, - result_size, - shape, - ndim, - axis, - naxis, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_max_c<_DataType>(q_ref, array1_in, result1, result_size, shape, ndim, axis, naxis, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_max_default_c)(void*, - void*, - const size_t, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t) = dpnp_max_c<_DataType>; +void (*dpnp_max_default_c)(void*, void*, const size_t, const shape_elem_type*, size_t, const shape_elem_type*, size_t) = + dpnp_max_c<_DataType>; template DPCTLSyclEventRef (*dpnp_max_ext_c)(DPCTLSyclQueueRef, @@ -630,25 +602,15 @@ void dpnp_mean_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_mean_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - shape, - ndim, - axis, - naxis, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_mean_c<_DataType, _ResultType>(q_ref, array1_in, result1, shape, ndim, axis, naxis, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_mean_default_c)(void*, - void*, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t) = dpnp_mean_c<_DataType, _ResultType>; +void (*dpnp_mean_default_c)(void*, void*, const shape_elem_type*, size_t, const shape_elem_type*, size_t) = + dpnp_mean_c<_DataType, _ResultType>; template DPCTLSyclEventRef (*dpnp_mean_ext_c)(DPCTLSyclQueueRef, @@ -716,25 +678,15 @@ void dpnp_median_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_median_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - shape, - ndim, - axis, - naxis, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_median_c<_DataType, _ResultType>(q_ref, array1_in, result1, shape, ndim, axis, naxis, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_median_default_c)(void*, - void*, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t) = dpnp_median_c<_DataType, _ResultType>; +void (*dpnp_median_default_c)(void*, void*, const shape_elem_type*, size_t, const shape_elem_type*, size_t) = + dpnp_median_c<_DataType, _ResultType>; template DPCTLSyclEventRef (*dpnp_median_ext_c)(DPCTLSyclQueueRef, @@ -958,27 +910,15 @@ void dpnp_min_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_min_c<_DataType>(q_ref, - array1_in, - result1, - result_size, - shape, - ndim, - axis, - naxis, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_min_c<_DataType>(q_ref, array1_in, result1, result_size, shape, ndim, axis, naxis, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_min_default_c)(void*, - void*, - const size_t, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t) = dpnp_min_c<_DataType>; +void (*dpnp_min_default_c)(void*, void*, const size_t, const shape_elem_type*, size_t, const shape_elem_type*, size_t) = + dpnp_min_c<_DataType>; template DPCTLSyclEventRef (*dpnp_min_ext_c)(DPCTLSyclQueueRef, @@ -1042,13 +982,8 @@ void dpnp_nanvar_c(void* array1_in, void* mask_arr1, void* result1, const size_t { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_nanvar_c<_DataType>(q_ref, - array1_in, - mask_arr1, - result1, - result_size, - arr_size, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = + dpnp_nanvar_c<_DataType>(q_ref, array1_in, mask_arr1, result1, result_size, arr_size, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } @@ -1057,13 +992,8 @@ template void (*dpnp_nanvar_default_c)(void*, void*, void*, const size_t, size_t) = dpnp_nanvar_c<_DataType>; template -DPCTLSyclEventRef (*dpnp_nanvar_ext_c)(DPCTLSyclQueueRef, - void*, - void*, - void*, - const size_t, - size_t, - const DPCTLEventVectorRef) = dpnp_nanvar_c<_DataType>; +DPCTLSyclEventRef (*dpnp_nanvar_ext_c)( + DPCTLSyclQueueRef, void*, void*, void*, const size_t, size_t, const DPCTLEventVectorRef) = dpnp_nanvar_c<_DataType>; template DPCTLSyclEventRef dpnp_std_c(DPCTLSyclQueueRef q_ref, @@ -1103,22 +1033,23 @@ DPCTLSyclEventRef dpnp_std_c(DPCTLSyclQueueRef q_ref, const size_t var_strides_size_in_bytes = var_ndim * sizeof(shape_elem_type); shape_elem_type* var_shape = reinterpret_cast(sycl::malloc_shared(var_shape_size_in_bytes, q)); *var_shape = 1; - shape_elem_type* var_strides = reinterpret_cast(sycl::malloc_shared(var_strides_size_in_bytes, - q)); + shape_elem_type* var_strides = + reinterpret_cast(sycl::malloc_shared(var_strides_size_in_bytes, q)); *var_strides = 1; - DPCTLSyclEventRef e_sqrt_ref = - dpnp_sqrt_c<_ResultType, _ResultType>(q_ref, result1, - result1_size, - result1_ndim, - result1_shape, - result1_strides, - var, - var_size, - var_ndim, - var_shape, - var_strides, - NULL, NULL); + DPCTLSyclEventRef e_sqrt_ref = dpnp_sqrt_c<_ResultType, _ResultType>(q_ref, + result1, + result1_size, + result1_ndim, + result1_shape, + result1_strides, + var, + var_size, + var_ndim, + var_shape, + var_strides, + NULL, + NULL); DPCTLEvent_WaitAndThrow(e_sqrt_ref); DPCTLEvent_Delete(e_sqrt_ref); @@ -1142,27 +1073,15 @@ void dpnp_std_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_std_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - shape, - ndim, - axis, - naxis, - ddof, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_std_c<_DataType, _ResultType>( + q_ref, array1_in, result1, shape, ndim, axis, naxis, ddof, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_std_default_c)(void*, - void*, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t, - size_t) = dpnp_std_c<_DataType, _ResultType>; +void (*dpnp_std_default_c)(void*, void*, const shape_elem_type*, size_t, const shape_elem_type*, size_t, size_t) = + dpnp_std_c<_DataType, _ResultType>; template DPCTLSyclEventRef (*dpnp_std_ext_c)(DPCTLSyclQueueRef, @@ -1212,8 +1131,8 @@ DPCTLSyclEventRef dpnp_var_c(DPCTLSyclQueueRef q_ref, dpnp_mean_c<_DataType, _ResultType>(array1, mean, shape, ndim, axis, naxis); _ResultType mean_val = mean[0]; - _ResultType* squared_deviations = reinterpret_cast<_ResultType*>(sycl::malloc_shared(size * sizeof(_ResultType), - q)); + _ResultType* squared_deviations = + reinterpret_cast<_ResultType*>(sycl::malloc_shared(size * sizeof(_ResultType), q)); sycl::range<1> gws(size); auto kernel_parallel_for_func = [=](sycl::id<1> global_id) { @@ -1254,27 +1173,15 @@ void dpnp_var_c(void* array1_in, { DPCTLSyclQueueRef q_ref = reinterpret_cast(&DPNP_QUEUE); DPCTLEventVectorRef dep_event_vec_ref = nullptr; - DPCTLSyclEventRef event_ref = dpnp_var_c<_DataType, _ResultType>(q_ref, - array1_in, - result1, - shape, - ndim, - axis, - naxis, - ddof, - dep_event_vec_ref); + DPCTLSyclEventRef event_ref = dpnp_var_c<_DataType, _ResultType>( + q_ref, array1_in, result1, shape, ndim, axis, naxis, ddof, dep_event_vec_ref); DPCTLEvent_WaitAndThrow(event_ref); DPCTLEvent_Delete(event_ref); } template -void (*dpnp_var_default_c)(void*, - void*, - const shape_elem_type*, - size_t, - const shape_elem_type*, - size_t, - size_t) = dpnp_var_c<_DataType, _ResultType>; +void (*dpnp_var_default_c)(void*, void*, const shape_elem_type*, size_t, const shape_elem_type*, size_t, size_t) = + dpnp_var_c<_DataType, _ResultType>; template DPCTLSyclEventRef (*dpnp_var_ext_c)(DPCTLSyclQueueRef, @@ -1293,87 +1200,87 @@ void func_map_init_statistics(func_map_t& fmap) eft_INT, (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_DBL] = { eft_DBL, (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_LNG][eft_DBL] = { eft_DBL, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_INT] = { eft_DBL, (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_LNG] = { eft_DBL, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_default_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_default_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_correlate_default_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_INT][eft_INT] = { eft_INT, (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_INT][eft_LNG] = { eft_LNG, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_INT][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_INT][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_INT][eft_DBL] = { eft_DBL, (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_LNG][eft_INT] = { eft_LNG, (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_LNG][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_LNG][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_LNG][eft_DBL] = { eft_DBL, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_INT] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_LNG] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_INT] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_LNG] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_FLT] = {eft_FLT, + (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_FLT][eft_DBL] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_INT] = { eft_DBL, (void*)dpnp_correlate_ext_c}; fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_LNG] = { eft_DBL, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_FLT] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_FLT] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; + fmap[DPNPFuncName::DPNP_FN_CORRELATE_EXT][eft_DBL][eft_DBL] = {eft_DBL, + (void*)dpnp_correlate_ext_c}; - fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_BLN][eft_BLN] = { - eft_LNG, (void*)dpnp_count_nonzero_default_c}; + fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_BLN][eft_BLN] = {eft_LNG, + (void*)dpnp_count_nonzero_default_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_INT][eft_INT] = { eft_LNG, (void*)dpnp_count_nonzero_default_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_count_nonzero_default_c}; - fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_FLT][eft_FLT] = { - eft_LNG, (void*)dpnp_count_nonzero_default_c}; + fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_FLT][eft_FLT] = {eft_LNG, + (void*)dpnp_count_nonzero_default_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO][eft_DBL][eft_DBL] = { eft_LNG, (void*)dpnp_count_nonzero_default_c}; - fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_BLN][eft_BLN] = { - eft_LNG, (void*)dpnp_count_nonzero_ext_c}; + fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_BLN][eft_BLN] = {eft_LNG, + (void*)dpnp_count_nonzero_ext_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_INT][eft_INT] = { eft_LNG, (void*)dpnp_count_nonzero_ext_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_LNG][eft_LNG] = { eft_LNG, (void*)dpnp_count_nonzero_ext_c}; - fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_FLT][eft_FLT] = { - eft_LNG, (void*)dpnp_count_nonzero_ext_c}; + fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_FLT][eft_FLT] = {eft_LNG, + (void*)dpnp_count_nonzero_ext_c}; fmap[DPNPFuncName::DPNP_FN_COUNT_NONZERO_EXT][eft_DBL][eft_DBL] = { eft_LNG, (void*)dpnp_count_nonzero_ext_c}; diff --git a/dpnp/backend/src/dpnp_iterator.hpp b/dpnp/backend/src/dpnp_iterator.hpp index d046545fdab8..ce936222182d 100644 --- a/dpnp/backend/src/dpnp_iterator.hpp +++ b/dpnp/backend/src/dpnp_iterator.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -319,15 +319,14 @@ class DPNPC_id final broadcast_axes_size = valid_axes.size(); const size_type broadcast_axes_size_in_bytes = broadcast_axes_size * sizeof(size_type); - broadcast_axes = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - broadcast_axes_size_in_bytes)); + broadcast_axes = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, broadcast_axes_size_in_bytes)); std::copy(valid_axes.begin(), valid_axes.end(), broadcast_axes); output_size = std::accumulate( output_shape, output_shape + output_shape_size, size_type(1), std::multiplies()); - output_shape_strides = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - output_shape_size_in_bytes)); + output_shape_strides = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, output_shape_size_in_bytes)); get_shape_offsets_inkernel(output_shape, output_shape_size, output_shape_strides); iteration_size = 1; @@ -416,8 +415,8 @@ class DPNPC_id final output_size = std::accumulate( output_shape, output_shape + output_shape_size, size_type(1), std::multiplies()); - output_shape_strides = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - output_shape_size_in_bytes)); + output_shape_strides = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, output_shape_size_in_bytes)); get_shape_offsets_inkernel(output_shape, output_shape_size, output_shape_strides); iteration_size = 1; @@ -429,13 +428,13 @@ class DPNPC_id final iteration_size *= axis_dim; } - iteration_shape_strides = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - iteration_shape_size_in_bytes)); + iteration_shape_strides = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, iteration_shape_size_in_bytes)); get_shape_offsets_inkernel( iteration_shape.data(), iteration_shape.size(), iteration_shape_strides); - axes_shape_strides = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - iteration_shape_size_in_bytes)); + axes_shape_strides = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, iteration_shape_size_in_bytes)); for (size_t i = 0; i < static_cast(iteration_shape_size); ++i) { axes_shape_strides[i] = input_shape_strides[axes[i]]; @@ -503,8 +502,8 @@ class DPNPC_id final } input_shape_size = __shape.size(); - input_shape = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - input_shape_size * sizeof(size_type))); + input_shape = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, input_shape_size * sizeof(size_type))); std::copy(__shape.begin(), __shape.end(), input_shape); input_shape_strides = @@ -539,8 +538,8 @@ class DPNPC_id final } input_shape_size = __shape.size(); - input_shape = reinterpret_cast(dpnp_memory_alloc_c(queue_ref, - input_shape_size * sizeof(size_type))); + input_shape = + reinterpret_cast(dpnp_memory_alloc_c(queue_ref, input_shape_size * sizeof(size_type))); std::copy(__shape.begin(), __shape.end(), input_shape); input_shape_strides = @@ -646,7 +645,7 @@ class DPNPC_id final free_output_memory(); } - DPCTLSyclQueueRef queue_ref = nullptr; /**< reference to SYCL queue */ + DPCTLSyclQueueRef queue_ref = nullptr; /**< reference to SYCL queue */ pointer data = nullptr; /**< input array begin pointer */ size_type input_size = size_type{}; /**< input array size */ diff --git a/dpnp/backend/src/dpnp_utils.hpp b/dpnp/backend/src/dpnp_utils.hpp index f84dd3a74721..d9f332c41981 100644 --- a/dpnp/backend/src/dpnp_utils.hpp +++ b/dpnp/backend/src/dpnp_utils.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -240,8 +240,7 @@ static inline bool */ namespace { - [[maybe_unused]] - std::vector cast_event_vector(const DPCTLEventVectorRef event_vec_ref) + [[maybe_unused]] std::vector cast_event_vector(const DPCTLEventVectorRef event_vec_ref) { const size_t event_vec_size = DPCTLEventVector_Size(event_vec_ref); @@ -255,7 +254,7 @@ namespace } return event_vec; } -} +} // namespace /** * @ingroup BACKEND_UTILS diff --git a/dpnp/backend/src/dpnpc_memory_adapter.hpp b/dpnp/backend/src/dpnpc_memory_adapter.hpp index 3b07795ed5f6..4da763780fe6 100644 --- a/dpnp/backend/src/dpnpc_memory_adapter.hpp +++ b/dpnp/backend/src/dpnpc_memory_adapter.hpp @@ -1,5 +1,5 @@ //***************************************************************************** -// Copyright (c) 2016-2020, Intel Corporation +// Copyright (c) 2016-2022, Intel Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -43,8 +43,8 @@ template class DPNPC_ptr_adapter final { - DPCTLSyclQueueRef queue_ref; /**< reference to SYCL queue */ - sycl::queue queue; /**< SYCL queue */ + DPCTLSyclQueueRef queue_ref; /**< reference to SYCL queue */ + sycl::queue queue; /**< SYCL queue */ void* aux_ptr = nullptr; /**< pointer to allocated memory by this adapter */ void* orig_ptr = nullptr; /**< original pointer to memory given by parameters */ size_t size_in_bytes = 0; /**< size of bytes of the memory */ @@ -163,16 +163,17 @@ class DPNPC_ptr_adapter final dpnp_memory_memcpy_c(queue_ref, orig_ptr, aux_ptr, size_in_bytes); } - void depends_on(const std::vector &new_deps) { - assert(allocated); + void depends_on(const std::vector& new_deps) + { + assert(allocated); deps.insert(std::end(deps), std::begin(new_deps), std::end(new_deps)); } - void depends_on(const sycl::event &new_dep) { - assert(allocated); + void depends_on(const sycl::event& new_dep) + { + assert(allocated); deps.push_back(new_dep); } - }; #endif // DPNP_MEMORY_ADAPTER_H diff --git a/dpnp/config.py b/dpnp/config.py index a9a6b6ec7c52..7d811837e0c0 100644 --- a/dpnp/config.py +++ b/dpnp/config.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,28 +27,29 @@ import os - -__DPNP_ORIGIN__ = int(os.getenv('DPNP_ORIGIN', 0)) -''' +__DPNP_ORIGIN__ = int(os.getenv("DPNP_ORIGIN", 0)) +""" Explicitly use original host Python NumPy -''' +""" -__DPNP_QUEUE_GPU__ = int(os.getenv('DPNP_QUEUE_GPU', 0)) -''' +__DPNP_QUEUE_GPU__ = int(os.getenv("DPNP_QUEUE_GPU", 0)) +""" Explicitly use GPU for SYCL queue -''' +""" -__DPNP_OUTPUT_NUMPY__ = int(os.getenv('DPNP_OUTPUT_NUMPY', 0)) -''' +__DPNP_OUTPUT_NUMPY__ = int(os.getenv("DPNP_OUTPUT_NUMPY", 0)) +""" Explicitly use NumPy.ndarray as return type for creation functions -''' +""" -__DPNP_OUTPUT_DPCTL__ = int(os.getenv('DPNP_OUTPUT_DPCTL', 1)) -''' +__DPNP_OUTPUT_DPCTL__ = int(os.getenv("DPNP_OUTPUT_DPCTL", 1)) +""" Explicitly use DPCtl package container as return type for creation functions -''' +""" -__DPNP_OUTPUT_DPCTL_DEFAULT_SHARED__ = int(os.getenv('DPNP_OUTPUT_DPCTL_DEFAULT_SHARED', 0)) -''' +__DPNP_OUTPUT_DPCTL_DEFAULT_SHARED__ = int( + os.getenv("DPNP_OUTPUT_DPCTL_DEFAULT_SHARED", 0) +) +""" Explicitly use SYCL shared memory parameter in DPCtl array constructor for creation functions -''' +""" diff --git a/dpnp/dparray.pyx b/dpnp/dparray.pyx index 1195f185ee73..da2f0f8e9940 100644 --- a/dpnp/dparray.pyx +++ b/dpnp/dparray.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -36,18 +36,23 @@ using USB interface for an Intel GPU device. from libcpp cimport bool as cpp_bool -from dpnp.dpnp_iface_types import * -from dpnp.dpnp_iface import * - # to avoid interference with Python internal functions -from dpnp.dpnp_iface import sum as iface_sum -from dpnp.dpnp_iface import prod as iface_prod +from dpnp.dpnp_iface import * from dpnp.dpnp_iface import get_dpnp_descriptor as iface_get_dpnp_descriptor +from dpnp.dpnp_iface import prod as iface_prod +from dpnp.dpnp_iface import sum as iface_sum +from dpnp.dpnp_iface_types import * from dpnp.dpnp_algo cimport * -from dpnp.dpnp_iface_statistics import min, max # TODO do the same as for iface_sum -from dpnp.dpnp_iface_logic import all, any # TODO do the same as for iface_sum + import numpy + +from dpnp.dpnp_iface_logic import all, any # TODO do the same as for iface_sum +from dpnp.dpnp_iface_statistics import ( # TODO do the same as for iface_sum + max, + min, +) + cimport numpy cimport dpnp.dpnp_utils as utils diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 2d3f1a7870a9..c09dc0479240 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,14 +25,11 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** cimport dpctl as c_dpctl - -cimport dpctl as c_dpctl - from libcpp cimport bool as cpp_bool +from dpnp.dpnp_algo cimport shape_elem_type, shape_type_c from dpnp.dpnp_utils.dpnp_algo_utils cimport dpnp_descriptor -from dpnp.dpnp_algo cimport shape_elem_type, shape_type_c cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this namespace for Enum import cdef enum DPNPFuncName "DPNPFuncName": diff --git a/dpnp/dpnp_algo/dpnp_algo.pyx b/dpnp/dpnp_algo/dpnp_algo.pyx index 18ab2041d00e..5d6d414ff5a5 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pyx +++ b/dpnp/dpnp_algo/dpnp_algo.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -34,21 +34,22 @@ and the rest of the library from libc.time cimport time, time_t from libcpp.vector cimport vector + +import dpctl + import dpnp import dpnp.config as config import dpnp.dpnp_container as dpnp_container import dpnp.dpnp_utils as utils_py from dpnp.dpnp_array import dpnp_array -import dpctl - cimport cpython +cimport numpy + cimport dpnp.dpnp_utils as utils -cimport numpy import numpy - __all__ = [ "dpnp_arange", "dpnp_astype", diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 05623fc67a0a..f70082172923 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,9 +25,10 @@ # ***************************************************************************** import dpctl.tensor as dpt +import numpy from dpctl.tensor._device import normalize_queue_device + import dpnp -import numpy class dpnp_array: @@ -39,16 +40,18 @@ class dpnp_array: """ - def __init__(self, - shape, - dtype="f8", - buffer=None, - offset=0, - strides=None, - order="C", - device=None, - usm_type="device", - sycl_queue=None): + def __init__( + self, + shape, + dtype="f8", + buffer=None, + offset=0, + strides=None, + order="C", + device=None, + usm_type="device", + sycl_queue=None, + ): if buffer is not None: if not isinstance(buffer, dpt.usm_ndarray): raise TypeError( @@ -60,18 +63,20 @@ def __init__(self, "Expected buffer.shape={}, got {}" "".format(shape, buffer.shape) ) - self._array_obj = dpt.asarray(buffer, - copy=False, - order=order) + self._array_obj = dpt.asarray(buffer, copy=False, order=order) else: - sycl_queue_normalized = normalize_queue_device(sycl_queue=sycl_queue, device=device) - self._array_obj = dpt.usm_ndarray(shape, - dtype=dtype, - strides=strides, - buffer=usm_type, - offset=offset, - order=order, - buffer_ctor_kwargs={"queue": sycl_queue_normalized}) + sycl_queue_normalized = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + self._array_obj = dpt.usm_ndarray( + shape, + dtype=dtype, + strides=strides, + buffer=usm_type, + offset=offset, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue_normalized}, + ) @property def __sycl_usm_array_interface__(self): @@ -94,11 +99,11 @@ def T(self): return dpnp.transpose(self) def to_device(self, target_device): - """ - Transfer array to target device - """ + """Transfer array to target device.""" - return dpnp_array(shape=self.shape, buffer=self.get_array().to_device(target_device)) + return dpnp_array( + shape=self.shape, buffer=self.get_array().to_device(target_device) + ) @property def sycl_queue(self): @@ -122,30 +127,30 @@ def __abs__(self): def __add__(self, other): return dpnp.add(self, other) - # '__and__', - # '__array__', - # '__array_finalize__', - # '__array_function__', - # '__array_interface__', - # '__array_prepare__', - # '__array_priority__', - # '__array_struct__', - # '__array_ufunc__', - # '__array_wrap__', + # '__and__', + # '__array__', + # '__array_finalize__', + # '__array_function__', + # '__array_interface__', + # '__array_prepare__', + # '__array_priority__', + # '__array_struct__', + # '__array_ufunc__', + # '__array_wrap__', def __bool__(self): return self._array_obj.__bool__() - # '__class__', - # '__complex__', - # '__contains__', - # '__copy__', - # '__deepcopy__', - # '__delattr__', - # '__delitem__', - # '__dir__', - # '__divmod__', - # '__doc__', + # '__class__', + # '__complex__', + # '__contains__', + # '__copy__', + # '__deepcopy__', + # '__delattr__', + # '__delitem__', + # '__dir__', + # '__divmod__', + # '__doc__', def __eq__(self, other): return dpnp.equal(self, other) @@ -153,20 +158,21 @@ def __eq__(self, other): def __float__(self): return self._array_obj.__float__() - # '__floordiv__', - # '__format__', + # '__floordiv__', + # '__format__', def __ge__(self, other): return dpnp.greater_equal(self, other) - # '__getattribute__', + # '__getattribute__', def __getitem__(self, key): item = self._array_obj.__getitem__(key) if not isinstance(item, dpt.usm_ndarray): raise RuntimeError( "Expected dpctl.tensor.usm_ndarray, got {}" - "".format(type(item))) + "".format(type(item)) + ) res = self.__new__(dpnp_array) res._array_obj = item @@ -176,41 +182,39 @@ def __getitem__(self, key): def __gt__(self, other): return dpnp.greater(self, other) - # '__hash__', - # '__iadd__', - # '__iand__', - # '__ifloordiv__', - # '__ilshift__', - # '__imatmul__', - # '__imod__', - # '__imul__', - # '__index__', - # '__init__', - # '__init_subclass__', + # '__hash__', + # '__iadd__', + # '__iand__', + # '__ifloordiv__', + # '__ilshift__', + # '__imatmul__', + # '__imod__', + # '__imul__', + # '__index__', + # '__init__', + # '__init_subclass__', def __int__(self): return self._array_obj.__int__() - # '__invert__', - # '__ior__', - # '__ipow__', - # '__irshift__', - # '__isub__', - # '__iter__', - # '__itruediv__', - # '__ixor__', + # '__invert__', + # '__ior__', + # '__ipow__', + # '__irshift__', + # '__isub__', + # '__iter__', + # '__itruediv__', + # '__ixor__', def __le__(self, other): return dpnp.less_equal(self, other) def __len__(self): - """ - Performs the operation __len__. - """ + """Performs the operation __len__.""" return self._array_obj.__len__() - # '__lshift__', + # '__lshift__', def __lt__(self, other): return dpnp.less(self, other) @@ -230,9 +234,9 @@ def __ne__(self, other): def __neg__(self): return dpnp.negative(self) - # '__new__', - # '__or__', - # '__pos__', + # '__new__', + # '__or__', + # '__pos__', def __pow__(self, other): return dpnp.power(self, other) @@ -240,13 +244,13 @@ def __pow__(self, other): def __radd__(self, other): return dpnp.add(other, self) - # '__rand__', - # '__rdivmod__', - # '__reduce__', - # '__reduce_ex__', - # '__repr__', - # '__rfloordiv__', - # '__rlshift__', + # '__rand__', + # '__rdivmod__', + # '__reduce__', + # '__reduce_ex__', + # '__repr__', + # '__rfloordiv__', + # '__rlshift__', def __rmatmul__(self, other): return dpnp.matmul(other, self) @@ -257,26 +261,27 @@ def __rmod__(self, other): def __rmul__(self, other): return dpnp.multiply(other, self) - # '__ror__', - # '__rpow__', - # '__rrshift__', - # '__rshift__', - # '__rsub__', + # '__ror__', + # '__rpow__', + # '__rrshift__', + # '__rshift__', + # '__rsub__', def __rtruediv__(self, other): return dpnp.true_divide(other, self) - # '__rxor__', - # '__setattr__', + # '__rxor__', + # '__setattr__', def __setitem__(self, key, val): self._array_obj.__setitem__(key, val) - # '__setstate__', - # '__sizeof__', + # '__setstate__', + # '__sizeof__', def __str__(self): - """ Output values from the array to standard output + """ + Output values from the array to standard output. Example: [[ 136. 136. 136.] @@ -290,12 +295,12 @@ def __str__(self): def __sub__(self, other): return dpnp.subtract(self, other) - # '__subclasshook__', + # '__subclasshook__', def __truediv__(self, other): return dpnp.true_divide(self, other) - # '__xor__', + # '__xor__', def all(self, axis=None, out=None, keepdims=False): """ @@ -378,12 +383,11 @@ def argmin(self, axis=None, out=None): """ return dpnp.argmin(self, axis, out) -# 'argpartition', + # 'argpartition', def argsort(self, axis=-1, kind=None, order=None): """ - Return an ndarray of indices that sort the array along the - specified axis. + Return an ndarray of indices that sort the array along the specified axis. Parameters ---------- @@ -422,8 +426,9 @@ def argsort(self, axis=-1, kind=None, order=None): """ return dpnp.argsort(self, axis, kind, order) - def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): - """Copy the array with data type casting. + def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True): + """ + Copy the array with data type casting. Args: dtype: Target type. @@ -446,19 +451,16 @@ def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): return dpnp.astype(self, dtype, order, casting, subok, copy) - # 'base', - # 'byteswap', - - def choose(input, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. + # 'base', + # 'byteswap', - """ + def choose(input, choices, out=None, mode="raise"): + """Construct an array from an index array and a set of arrays to choose from.""" return dpnp.choose(input, choices, out, mode) - # 'clip', - # 'compress', + # 'clip', + # 'compress', def conj(self): """ @@ -486,9 +488,9 @@ def conjugate(self): else: return dpnp.conjugate(self) - # 'copy', - # 'ctypes', - # 'cumprod', + # 'copy', + # 'ctypes', + # 'cumprod', def cumsum(self, axis=None, dtype=None, out=None): """ @@ -502,7 +504,7 @@ def cumsum(self, axis=None, dtype=None, out=None): return dpnp.cumsum(self, axis=axis, dtype=dtype, out=out) - # 'data', + # 'data', def diagonal(input, offset=0, axis1=0, axis2=1): """ @@ -516,17 +518,16 @@ def diagonal(input, offset=0, axis1=0, axis2=1): return dpnp.diagonal(input, offset, axis1, axis2) - # 'dot', + # 'dot', @property def dtype(self): - """ - """ + """Return data type of an object.""" return self._array_obj.dtype - # 'dump', - # 'dumps', + # 'dump', + # 'dumps', def fill(self, value): """ @@ -553,18 +554,15 @@ def fill(self, value): for i in range(self.size): self.flat[i] = value - # 'flags', + # 'flags', @property def flat(self): - """ - Return a flat iterator, or set a flattened version of self to value. - - """ + """Return a flat iterator, or set a flattened version of self to value.""" return dpnp.flatiter(self) - def flatten(self, order='C'): + def flatten(self, order="C"): """ Return a copy of the array collapsed into one dimension. @@ -587,21 +585,25 @@ def flatten(self, order='C'): """ new_arr = self.__new__(dpnp_array) - new_arr._array_obj = dpt.empty(self.shape, - dtype=self.dtype, - order=order, - device=self._array_obj.sycl_device, - usm_type=self._array_obj.usm_type, - sycl_queue=self._array_obj.sycl_queue) + new_arr._array_obj = dpt.empty( + self.shape, + dtype=self.dtype, + order=order, + device=self._array_obj.sycl_device, + usm_type=self._array_obj.usm_type, + sycl_queue=self._array_obj.sycl_queue, + ) if self.size > 0: - dpt._copy_utils._copy_from_usm_ndarray_to_usm_ndarray(new_arr._array_obj, self._array_obj) - new_arr._array_obj = dpt.reshape(new_arr._array_obj, (self.size, )) + dpt._copy_utils._copy_from_usm_ndarray_to_usm_ndarray( + new_arr._array_obj, self._array_obj + ) + new_arr._array_obj = dpt.reshape(new_arr._array_obj, (self.size,)) return new_arr - # 'getfield', - # 'imag', + # 'getfield', + # 'imag', def item(self, id=None): """ @@ -630,56 +632,71 @@ def item(self, id=None): if id is None: if self.size != 1: - raise ValueError("DPNP dparray::item(): can only convert an array of size 1 to a Python scalar") + raise ValueError( + "DPNP dparray::item(): can only convert an array of size 1 to a Python scalar" + ) else: id = 0 return self.flat[id] - # 'itemset', + # 'itemset', @property def itemsize(self): - """ - """ - + """ """ return self._array_obj.itemsize - def max(self, axis=None, out=None, keepdims=numpy._NoValue, initial=numpy._NoValue, where=numpy._NoValue): - """ - Return the maximum along an axis. - """ + def max( + self, + axis=None, + out=None, + keepdims=numpy._NoValue, + initial=numpy._NoValue, + where=numpy._NoValue, + ): + """Return the maximum along an axis.""" return dpnp.max(self, axis, out, keepdims, initial, where) def mean(self, axis=None): - """ - Returns the average of the array elements. - """ + """Returns the average of the array elements.""" return dpnp.mean(self, axis) - def min(self, axis=None, out=None, keepdims=numpy._NoValue, initial=numpy._NoValue, where=numpy._NoValue): - """ - Return the minimum along a given axis. - """ + def min( + self, + axis=None, + out=None, + keepdims=numpy._NoValue, + initial=numpy._NoValue, + where=numpy._NoValue, + ): + """Return the minimum along a given axis.""" return dpnp.min(self, axis, out, keepdims, initial, where) - # 'nbytes', + # 'nbytes', @property def ndim(self): - """ - """ + """ """ return self._array_obj.ndim - # 'newbyteorder', - # 'nonzero', - # 'partition', + # 'newbyteorder', + # 'nonzero', + # 'partition', - def prod(self, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True): + def prod( + self, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, + ): """ Returns the prod along a given axis. @@ -691,13 +708,13 @@ def prod(self, axis=None, dtype=None, out=None, keepdims=False, initial=None, wh return dpnp.prod(self, axis, dtype, out, keepdims, initial, where) - # 'ptp', - # 'put', - # 'ravel', - # 'real', - # 'repeat', + # 'ptp', + # 'put', + # 'ravel', + # 'real', + # 'repeat', - def reshape(self, d0, *dn, order=b'C'): + def reshape(self, d0, *dn, order=b"C"): """ Returns an array containing the same data with a new shape. @@ -727,7 +744,7 @@ def reshape(self, d0, *dn, order=b'C'): return dpnp.reshape(self, shape_tup) - # 'resize', + # 'resize', def round(self, decimals=0, out=None): """ @@ -740,9 +757,9 @@ def round(self, decimals=0, out=None): return dpnp.around(self, decimals, out) - # 'searchsorted', - # 'setfield', - # 'setflags', + # 'searchsorted', + # 'setfield', + # 'setflags', @property def shape(self): @@ -759,7 +776,10 @@ def shape(self): @shape.setter def shape(self, newshape): - """Set new lengths of axes. A tuple of numbers represents size of each dimention. + """ + Set new lengths of axes. + + A tuple of numbers represents size of each dimention. It involves reshaping without copy. If the array cannot be reshaped without copy, it raises an exception. @@ -771,19 +791,17 @@ def shape(self, newshape): @property def shape(self): - """ - """ + """ """ return self._array_obj.shape @property def size(self): - """ - """ + """ """ return self._array_obj.size - # 'sort', + # 'sort', def squeeze(self, axis=None): """ @@ -797,7 +815,7 @@ def squeeze(self, axis=None): return dpnp.squeeze(self, axis) def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ Returns the variance of the array elements, along given axis. + """Returns the variance of the array elements, along given axis. .. seealso:: :obj:`dpnp.var` for full documentation, @@ -808,12 +826,19 @@ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): @property def strides(self): - """ - """ + """ """ return self._array_obj.strides - def sum(self, axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True): + def sum( + self, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=0, + where=True, + ): """ Returns the sum along a given axis. @@ -825,9 +850,9 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=False, initial=0, where= return dpnp.sum(self, axis, dtype, out, keepdims, initial, where) - # 'swapaxes', + # 'swapaxes', - def take(self, indices, axis=None, out=None, mode='raise'): + def take(self, indices, axis=None, out=None, mode="raise"): """ Take elements from an array. @@ -837,11 +862,11 @@ def take(self, indices, axis=None, out=None, mode='raise'): return dpnp.take(self, indices, axis, out, mode) - # 'tobytes', - # 'tofile', - # 'tolist', - # 'tostring', - # 'trace', + # 'tobytes', + # 'tofile', + # 'tolist', + # 'tostring', + # 'trace', def transpose(self, *axes): """ @@ -873,4 +898,5 @@ def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return dpnp.var(self, axis, dtype, out, ddof, keepdims) - # 'view' + +# 'view' diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index bfc7e469d700..d47da3fad763 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -34,15 +34,14 @@ """ -import dpnp.config as config -# from dpnp.dparray import dparray -from dpnp.dpnp_array import dpnp_array - -import numpy - import dpctl.tensor as dpt +import numpy from dpctl.tensor._device import normalize_queue_device +import dpnp.config as config + +# from dpnp.dparray import dparray +from dpnp.dpnp_array import dpnp_array if config.__DPNP_OUTPUT_DPCTL__: try: @@ -64,43 +63,55 @@ ] -def asarray(x1, - dtype=None, - copy=False, - order="C", - device=None, - usm_type=None, - sycl_queue=None): +def asarray( + x1, + dtype=None, + copy=False, + order="C", + device=None, + usm_type=None, + sycl_queue=None, +): """Converts `x1` to `dpnp_array`.""" if isinstance(x1, dpnp_array): x1_obj = x1.get_array() else: x1_obj = x1 - sycl_queue_normalized = normalize_queue_device(sycl_queue=sycl_queue, device=device) - array_obj = dpt.asarray(x1_obj, - dtype=dtype, - copy=copy, - order=order, - usm_type=usm_type, - sycl_queue=sycl_queue_normalized) + sycl_queue_normalized = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + array_obj = dpt.asarray( + x1_obj, + dtype=dtype, + copy=copy, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue_normalized, + ) return dpnp_array(array_obj.shape, buffer=array_obj, order=order) -def empty(shape, - dtype="f8", - order="C", - device=None, - usm_type="device", - sycl_queue=None): +def empty( + shape, + dtype="f8", + order="C", + device=None, + usm_type="device", + sycl_queue=None, +): """Creates `dpnp_array` from uninitialized USM allocation.""" - sycl_queue_normalized = normalize_queue_device(sycl_queue=sycl_queue, device=device) - - array_obj = dpt.empty(shape, - dtype=dtype, - order=order, - usm_type=usm_type, - sycl_queue=sycl_queue_normalized) + sycl_queue_normalized = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + + array_obj = dpt.empty( + shape, + dtype=dtype, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue_normalized, + ) return dpnp_array(array_obj.shape, buffer=array_obj, order=order) diff --git a/dpnp/dpnp_flatiter.py b/dpnp/dpnp_flatiter.py index e45f5289324f..d2f3c0aef5a2 100644 --- a/dpnp/dpnp_flatiter.py +++ b/dpnp/dpnp_flatiter.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -24,10 +24,7 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -""" -Implementation of flatiter - -""" +"""Implementation of flatiter.""" import dpnp @@ -36,9 +33,7 @@ class flatiter: def __init__(self, X): if type(X) is not dpnp.ndarray: raise TypeError( - "Argument must be of type dpnp.ndarray, got {}".format( - type(X) - ) + "Argument must be of type dpnp.ndarray, got {}".format(type(X)) ) self.arr_ = X self.size_ = X.size diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index d0fccd6fcaed..4e213d580d49 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,14 +40,14 @@ """ -import os -import numpy -import numpy.lib.stride_tricks as np_st -import dpnp.config as config import collections +import os import dpctl +import numpy +import numpy.lib.stride_tricks as np_st +import dpnp.config as config from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * from dpnp.fft import * @@ -62,35 +62,34 @@ "dpnp_queue_initialize", "dpnp_queue_is_cpu", "get_dpnp_descriptor", - "get_include" + "get_include", ] from dpnp.dpnp_iface_arraycreation import * -from dpnp.dpnp_iface_bitwise import * -from dpnp.dpnp_iface_counting import * -from dpnp.dpnp_iface_indexing import * -from dpnp.dpnp_iface_libmath import * -from dpnp.dpnp_iface_linearalgebra import * -from dpnp.dpnp_iface_logic import * -from dpnp.dpnp_iface_manipulation import * -from dpnp.dpnp_iface_mathematical import * -from dpnp.dpnp_iface_searching import * -from dpnp.dpnp_iface_sorting import * -from dpnp.dpnp_iface_statistics import * -from dpnp.dpnp_iface_trigonometric import * - from dpnp.dpnp_iface_arraycreation import __all__ as __all__arraycreation +from dpnp.dpnp_iface_bitwise import * from dpnp.dpnp_iface_bitwise import __all__ as __all__bitwise +from dpnp.dpnp_iface_counting import * from dpnp.dpnp_iface_counting import __all__ as __all__counting +from dpnp.dpnp_iface_indexing import * from dpnp.dpnp_iface_indexing import __all__ as __all__indexing +from dpnp.dpnp_iface_libmath import * from dpnp.dpnp_iface_libmath import __all__ as __all__libmath +from dpnp.dpnp_iface_linearalgebra import * from dpnp.dpnp_iface_linearalgebra import __all__ as __all__linearalgebra +from dpnp.dpnp_iface_logic import * from dpnp.dpnp_iface_logic import __all__ as __all__logic +from dpnp.dpnp_iface_manipulation import * from dpnp.dpnp_iface_manipulation import __all__ as __all__manipulation +from dpnp.dpnp_iface_mathematical import * from dpnp.dpnp_iface_mathematical import __all__ as __all__mathematical +from dpnp.dpnp_iface_searching import * from dpnp.dpnp_iface_searching import __all__ as __all__searching +from dpnp.dpnp_iface_sorting import * from dpnp.dpnp_iface_sorting import __all__ as __all__sorting +from dpnp.dpnp_iface_statistics import * from dpnp.dpnp_iface_statistics import __all__ as __all__statistics +from dpnp.dpnp_iface_trigonometric import * from dpnp.dpnp_iface_trigonometric import __all__ as __all__trigonometric __all__ += __all__arraycreation @@ -126,7 +125,7 @@ def array_equal(a1, a2, equal_nan=False): return numpy.array_equal(a1, a2) -def asnumpy(input, order='C'): +def asnumpy(input, order="C"): """ Returns the NumPy array with input data. @@ -139,27 +138,34 @@ def asnumpy(input, order='C'): if isinstance(input, dpctl.tensor.usm_ndarray): return dpctl.tensor.to_numpy(input) - if config.__DPNP_OUTPUT_DPCTL__ and hasattr(input, "__sycl_usm_array_interface__"): + if config.__DPNP_OUTPUT_DPCTL__ and hasattr( + input, "__sycl_usm_array_interface__" + ): return dpctl.tensor.to_numpy(input.get_array()) return numpy.asarray(input, order=order) -def astype(x1, dtype, order='K', casting='unsafe', subok=True, copy=True): +def astype(x1, dtype, order="K", casting="unsafe", subok=True, copy=True): """Copy the array with data type casting.""" - if config.__DPNP_OUTPUT_DPCTL__ and hasattr(x1, "__sycl_usm_array_interface__"): + if config.__DPNP_OUTPUT_DPCTL__ and hasattr( + x1, "__sycl_usm_array_interface__" + ): import dpctl.tensor as dpt + # TODO: remove check dpctl.tensor has attribute "astype" if hasattr(dpt, "astype"): # return dpt.astype(x1, dtype, order=order, casting=casting, copy=copy) - return dpt.astype(x1.get_array(), dtype, order=order, casting=casting, copy=copy) + return dpt.astype( + x1.get_array(), dtype, order=order, casting=casting, copy=copy + ) x1_desc = get_dpnp_descriptor(x1) if not x1_desc: pass - elif order != 'K': + elif order != "K": pass - elif casting != 'unsafe': + elif casting != "unsafe": pass elif not subok: pass @@ -172,13 +178,19 @@ def astype(x1, dtype, order='K', casting='unsafe', subok=True, copy=True): else: return dpnp_astype(x1_desc, dtype).get_pyobj() - return call_origin(numpy.ndarray.astype, x1, dtype, order=order, casting=casting, subok=subok, copy=copy) + return call_origin( + numpy.ndarray.astype, + x1, + dtype, + order=order, + casting=casting, + subok=subok, + copy=copy, + ) def convert_single_elem_array_to_scalar(obj, keepdims=False): - """ - Convert array with single element to scalar - """ + """Convert array with single element to scalar.""" if (obj.ndim > 0) and (obj.size == 1) and (keepdims is False): return obj.dtype.type(obj[0]) @@ -186,7 +198,9 @@ def convert_single_elem_array_to_scalar(obj, keepdims=False): return obj -def get_dpnp_descriptor(ext_obj, copy_when_strides=True, copy_when_nondefault_queue=True): +def get_dpnp_descriptor( + ext_obj, copy_when_strides=True, copy_when_nondefault_queue=True +): """ Return True: never @@ -211,10 +225,15 @@ def get_dpnp_descriptor(ext_obj, copy_when_strides=True, copy_when_nondefault_qu # then this behavior can be disabled with setting "copy_when_strides" if copy_when_strides and getattr(ext_obj, "strides", None) is not None: # TODO: replace this workaround when usm_ndarray will provide such functionality - shape_offsets = tuple(numpy.prod(ext_obj.shape[i + 1:], dtype=numpy.int64) for i in range(ext_obj.ndim)) + shape_offsets = tuple( + numpy.prod(ext_obj.shape[i + 1 :], dtype=numpy.int64) + for i in range(ext_obj.ndim) + ) if hasattr(ext_obj, "__sycl_usm_array_interface__"): - ext_obj_offset = ext_obj.__sycl_usm_array_interface__.get("offset", 0) + ext_obj_offset = ext_obj.__sycl_usm_array_interface__.get( + "offset", 0 + ) else: ext_obj_offset = 0 @@ -229,7 +248,9 @@ def get_dpnp_descriptor(ext_obj, copy_when_strides=True, copy_when_nondefault_qu queue = getattr(arr_obj, "sycl_queue", None) if queue is not None and copy_when_nondefault_queue: default_queue = dpctl.SyclQueue() - queue_is_default = dpctl.utils.get_execution_queue([queue, default_queue]) is not None + queue_is_default = ( + dpctl.utils.get_execution_queue([queue, default_queue]) is not None + ) if not queue_is_default: ext_obj = array(arr_obj, sycl_queue=default_queue) @@ -241,9 +262,7 @@ def get_dpnp_descriptor(ext_obj, copy_when_strides=True, copy_when_nondefault_qu def get_include(): - """ - Return the directory that contains the DPNP C++ backend \\*.h header files. - """ + r"""Return the directory that contains the DPNP C++ backend \\*.h header files.""" dpnp_path = os.path.join(os.path.dirname(__file__), "backend", "include") diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 44c7b88c3c78..c397cf03e408 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -41,15 +41,13 @@ import numpy -import dpnp +import dpnp import dpnp.config as config +import dpnp.dpnp_container as dpnp_container from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp.dpnp_container as dpnp_container - - __all__ = [ "arange", "array", @@ -86,7 +84,7 @@ "triu", "vander", "zeros", - "zeros_like" + "zeros_like", ] @@ -113,7 +111,6 @@ def arange(start, stop=None, step=1, dtype=None): Examples -------- - >>> import dpnp as np >>> [i for i in np.arange(3)] [0, 1, 2] @@ -137,7 +134,9 @@ def arange(start, stop=None, step=1, dtype=None): pass elif (start is not None) and (stop is not None) and (start > stop): pass - elif (dtype is not None) and (dtype not in [dpnp.int32, dpnp.int64, dpnp.float32, dpnp.float64]): + elif (dtype is not None) and ( + dtype not in [dpnp.int32, dpnp.int64, dpnp.float32, dpnp.float64] + ): pass else: if dtype is None: @@ -155,16 +154,18 @@ def arange(start, stop=None, step=1, dtype=None): return call_origin(numpy.arange, start, stop=stop, step=step, dtype=dtype) -def array(x1, - dtype=None, - copy=True, - order="C", - subok=False, - ndmin=0, - like=None, - device=None, - usm_type=None, - sycl_queue=None): +def array( + x1, + dtype=None, + copy=True, + order="C", + subok=False, + ndmin=0, + like=None, + device=None, + usm_type=None, + sycl_queue=None, +): """ Creates an array. @@ -214,25 +215,29 @@ def array(x1, elif like is not None: pass else: - return dpnp_container.asarray(x1, - dtype=dtype, - copy=copy, - order=order, - device=device, - usm_type=usm_type, - sycl_queue=sycl_queue) - - return call_origin(numpy.array, - x1, - dtype=dtype, - copy=copy, - order=order, - subok=subok, - ndmin=ndmin, - like=like) - - -def asanyarray(a, dtype=None, order='C'): + return dpnp_container.asarray( + x1, + dtype=dtype, + copy=copy, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + + return call_origin( + numpy.array, + x1, + dtype=dtype, + copy=copy, + order=order, + subok=subok, + ndmin=ndmin, + like=like, + ) + + +def asanyarray(a, dtype=None, order="C"): """ Convert the input to an ndarray, but pass ndarray subclasses through. @@ -269,7 +274,7 @@ def asanyarray(a, dtype=None, order='C'): if isinstance(a, dpnp.ndarray): return a - if order != 'C': + if order != "C": pass else: return array(a, dtype=dtype, order=order) @@ -277,13 +282,15 @@ def asanyarray(a, dtype=None, order='C'): return call_origin(numpy.asanyarray, a, dtype, order) -def asarray(x1, - dtype=None, - order="C", - like=None, - device=None, - usm_type=None, - sycl_queue=None): +def asarray( + x1, + dtype=None, + order="C", + like=None, + device=None, + usm_type=None, + sycl_queue=None, +): """ Converts an input object into array. @@ -318,13 +325,15 @@ def asarray(x1, if like is not None: pass else: - return dpnp_container.asarray(x1, - dtype=dtype, - copy=True, # Converting Python sequence to usm_ndarray requires a copy - order=order, - device=device, - usm_type=usm_type, - sycl_queue=sycl_queue) + return dpnp_container.asarray( + x1, + dtype=dtype, + copy=True, # Converting Python sequence to usm_ndarray requires a copy + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) return call_origin(numpy.asarray, x1, dtype=dtype, order=order, like=like) @@ -365,7 +374,7 @@ def ascontiguousarray(a, dtype=None): # numpy.copy(a, order='K', subok=False) -def copy(x1, order='K', subok=False): +def copy(x1, order="K", subok=False): """ Return an array copy of the given object. @@ -390,9 +399,11 @@ def copy(x1, order='K', subok=False): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - if order != 'K': + if order != "K": pass elif subok: pass @@ -468,20 +479,24 @@ def diagflat(x1, k=0): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: input_ravel = dpnp.ravel(x1) - input_ravel_desc = dpnp.get_dpnp_descriptor(input_ravel, copy_when_nondefault_queue=False) + input_ravel_desc = dpnp.get_dpnp_descriptor( + input_ravel, copy_when_nondefault_queue=False + ) return dpnp_diag(input_ravel_desc, k).get_pyobj() return call_origin(numpy.diagflat, x1, k) -def empty(shape, - dtype="f8", - order="C", - like=None, - device=None, - usm_type="device", - sycl_queue=None): +def empty( + shape, + dtype="f8", + order="C", + like=None, + device=None, + usm_type="device", + sycl_queue=None, +): """ Return a new array of given shape and type, without initializing entries. @@ -510,17 +525,19 @@ def empty(shape, if like is not None: pass else: - return dpnp_container.empty(shape, - dtype=dtype, - order=order, - device=device, - usm_type=usm_type, - sycl_queue=sycl_queue) + return dpnp_container.empty( + shape, + dtype=dtype, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) return call_origin(numpy.empty, shape, dtype=dtype, order=order, like=like) -def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): +def empty_like(prototype, dtype=None, order="C", subok=False, shape=None): """ Return a new array with the same shape and type as a given array. @@ -548,8 +565,8 @@ def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): """ - if (not use_origin_backend()): - if order not in ('C', 'c', None): + if not use_origin_backend(): + if order not in ("C", "c", None): pass elif subok is not False: pass @@ -557,15 +574,18 @@ def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): _shape = shape if shape is not None else prototype.shape _dtype = dtype if dtype is not None else prototype.dtype.type - result = create_output_descriptor_py(_object_to_tuple(_shape), _dtype, None).get_pyobj() + result = create_output_descriptor_py( + _object_to_tuple(_shape), _dtype, None + ).get_pyobj() return result return call_origin(numpy.empty_like, prototype, dtype, order, subok, shape) -def eye(N, M=None, k=0, dtype=None, order='C', **kwargs): +def eye(N, M=None, k=0, dtype=None, order="C", **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. + For full documentation refer to :obj:`numpy.eye`. Limitations @@ -573,21 +593,25 @@ def eye(N, M=None, k=0, dtype=None, order='C', **kwargs): Input array is supported as :obj:`dpnp.ndarray`. Parameters ``order`` is supported only with default value. """ - if (not use_origin_backend()): + if not use_origin_backend(): if not isinstance(N, (int, dpnp.int, dpnp.int32, dpnp.int64)): pass - elif M is not None and not isinstance(M, (int, dpnp.int, dpnp.int32, dpnp.int64)): + elif M is not None and not isinstance( + M, (int, dpnp.int, dpnp.int32, dpnp.int64) + ): pass elif not isinstance(k, (int, dpnp.int, dpnp.int32, dpnp.int64)): pass - elif order != 'C': + elif order != "C": pass elif len(kwargs) != 0: pass else: return dpnp_eye(N, M=M, k=k, dtype=dtype).get_pyobj() - return call_origin(numpy.eye, N, M=M, k=k, dtype=dtype, order=order, **kwargs) + return call_origin( + numpy.eye, N, M=M, k=k, dtype=dtype, order=order, **kwargs + ) def frombuffer(buffer, **kwargs): @@ -672,7 +696,7 @@ def fromstring(string, **kwargs): return call_origin(numpy.fromstring, string, **kwargs) -def full(shape, fill_value, dtype=None, order='C'): +def full(shape, fill_value, dtype=None, order="C"): """ Return a new array of given shape and type, filled with `fill_value`. @@ -698,7 +722,7 @@ def full(shape, fill_value, dtype=None, order='C'): """ if not use_origin_backend(): - if order not in ('C', 'c', None): + if order not in ("C", "c", None): pass else: if dtype is None: @@ -710,7 +734,7 @@ def full(shape, fill_value, dtype=None, order='C'): # numpy.full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None) -def full_like(x1, fill_value, dtype=None, order='C', subok=False, shape=None): +def full_like(x1, fill_value, dtype=None, order="C", subok=False, shape=None): """ Return a full array with the same shape and type as a given array. @@ -739,7 +763,7 @@ def full_like(x1, fill_value, dtype=None, order='C', subok=False, shape=None): """ if not use_origin_backend(): - if order not in ('C', 'c', None): + if order not in ("C", "c", None): pass elif subok is not False: pass @@ -787,7 +811,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): if axis != 0: pass else: - return dpnp_geomspace(start, stop, num, endpoint, dtype, axis).get_pyobj() + return dpnp_geomspace( + start, stop, num, endpoint, dtype, axis + ).get_pyobj() return call_origin(numpy.geomspace, start, stop, num, endpoint, dtype, axis) @@ -826,7 +852,9 @@ def identity(n, dtype=None, *, like=None): return call_origin(numpy.identity, n, dtype=dtype, like=like) -def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): +def linspace( + start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0 +): """ Return evenly spaced numbers over a specified interval. @@ -871,11 +899,13 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis else: return res[0] - return call_origin(numpy.linspace, start, stop, num, endpoint, retstep, dtype, axis) + return call_origin( + numpy.linspace, start, stop, num, endpoint, retstep, dtype, axis + ) def loadtxt(fname, **kwargs): - """ + r""" Load data from a text file. Each row in the text file must have the same number of values. @@ -940,12 +970,16 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): if axis != 0: checker_throw_value_error("linspace", "axis", axis, 0) - return dpnp_logspace(start, stop, num, endpoint, base, dtype, axis).get_pyobj() + return dpnp_logspace( + start, stop, num, endpoint, base, dtype, axis + ).get_pyobj() - return call_origin(numpy.logspace, start, stop, num, endpoint, base, dtype, axis) + return call_origin( + numpy.logspace, start, stop, num, endpoint, base, dtype, axis + ) -def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): +def meshgrid(*xi, copy=True, sparse=False, indexing="xy"): """ Return coordinate matrices from coordinate vectors. @@ -995,7 +1029,9 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if not use_origin_backend(): # original limitation if indexing not in ["ij", "xy"]: - checker_throw_value_error("meshgrid", "indexing", indexing, "'ij' or 'xy'") + checker_throw_value_error( + "meshgrid", "indexing", indexing, "'ij' or 'xy'" + ) if copy is not True: checker_throw_value_error("meshgrid", "copy", copy, True) @@ -1067,7 +1103,7 @@ def __getitem__(self, key): ogrid = OGridClass() -def ones(shape, dtype=None, order='C'): +def ones(shape, dtype=None, order="C"): """ Return a new array of given shape and type, filled with ones. @@ -1097,8 +1133,8 @@ def ones(shape, dtype=None, order='C'): """ - if (not use_origin_backend()): - if order not in ('C', 'c', None): + if not use_origin_backend(): + if order not in ("C", "c", None): pass else: _dtype = dtype if dtype is not None else dpnp.float64 @@ -1109,7 +1145,7 @@ def ones(shape, dtype=None, order='C'): # numpy.ones_like(a, dtype=None, order='K', subok=True, shape=None) -def ones_like(x1, dtype=None, order='C', subok=False, shape=None): +def ones_like(x1, dtype=None, order="C", subok=False, shape=None): """ Return an array of ones with the same shape and type as a given array. @@ -1140,7 +1176,7 @@ def ones_like(x1, dtype=None, order='C', subok=False, shape=None): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: - if order not in ('C', 'c', None): + if order not in ("C", "c", None): pass elif subok is not False: pass @@ -1184,15 +1220,15 @@ def ptp(arr, axis=None, out=None, keepdims=numpy._NoValue): def trace(x1, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ - Return the sum along diagonals of the array. + Return the sum along diagonals of the array. - For full documentation refer to :obj:`numpy.trace`. + For full documentation refer to :obj:`numpy.trace`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameters ``axis1``, ``axis2``, ``out`` and ``dtype`` are supported only with default values. - """ + Limitations + ----------- + Input array is supported as :obj:`dpnp.ndarray`. + Parameters ``axis1``, ``axis2``, ``out`` and ``dtype`` are supported only with default values. + """ x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: @@ -1207,7 +1243,9 @@ def trace(x1, offset=0, axis1=0, axis2=1, dtype=None, out=None): elif out is not None: pass else: - return dpnp_trace(x1_desc, offset, axis1, axis2, dtype, out).get_pyobj() + return dpnp_trace( + x1_desc, offset, axis1, axis2, dtype, out + ).get_pyobj() return call_origin(numpy.trace, x1, offset, axis1, axis2, dtype, out) @@ -1353,7 +1391,7 @@ def vander(x1, N=None, increasing=False): return call_origin(numpy.vander, x1, N=N, increasing=increasing) -def zeros(shape, dtype=None, order='C'): +def zeros(shape, dtype=None, order="C"): """ Return a new array of given shape and type, filled with zeros. @@ -1383,8 +1421,8 @@ def zeros(shape, dtype=None, order='C'): """ - if (not use_origin_backend()): - if order not in ('C', 'c', None): + if not use_origin_backend(): + if order not in ("C", "c", None): pass else: _dtype = dtype if dtype is not None else dpnp.float64 @@ -1396,7 +1434,7 @@ def zeros(shape, dtype=None, order='C'): # numpy.zeros_like(a, dtype=None, order='K', subok=True, shape=None) -def zeros_like(x1, dtype=None, order='C', subok=False, shape=None): +def zeros_like(x1, dtype=None, order="C", subok=False, shape=None): """ Return an array of zeros with the same shape and type as a given array. @@ -1427,7 +1465,7 @@ def zeros_like(x1, dtype=None, order='C', subok=False, shape=None): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: - if order not in ('C', 'c', None): + if order not in ("C", "c", None): pass elif subok is not False: pass diff --git a/dpnp/dpnp_iface_bitwise.py b/dpnp/dpnp_iface_bitwise.py index 51a28b0464ea..e8ed0c8aa043 100644 --- a/dpnp/dpnp_iface_bitwise.py +++ b/dpnp/dpnp_iface_bitwise.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,31 +42,36 @@ import numpy - +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp __all__ = [ - 'bitwise_and', - 'bitwise_or', - 'bitwise_xor', - 'invert', - 'bitwise_not', - 'left_shift', - 'right_shift', + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "invert", + "bitwise_not", + "left_shift", + "right_shift", ] -def _check_nd_call(origin_func, dpnp_func, x1, x2, dtype=None, out=None, where=True, **kwargs): +def _check_nd_call( + origin_func, dpnp_func, x1, x2, dtype=None, out=None, where=True, **kwargs +): """Choose function to call based on input and call chosen fucntion.""" x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_nondefault_queue=False) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -90,10 +95,18 @@ def _check_nd_call(origin_func, dpnp_func, x1, x2, dtype=None, out=None, where=T elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_func(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_func( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(origin_func, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + origin_func, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def bitwise_and(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -127,7 +140,16 @@ def bitwise_and(x1, x2, dtype=None, out=None, where=True, **kwargs): [2, 4, 16] """ - return _check_nd_call(numpy.bitwise_and, dpnp_bitwise_and, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return _check_nd_call( + numpy.bitwise_and, + dpnp_bitwise_and, + x1, + x2, + dtype=dtype, + out=out, + where=where, + **kwargs + ) def bitwise_or(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -161,7 +183,16 @@ def bitwise_or(x1, x2, dtype=None, out=None, where=True, **kwargs): [6, 5, 255] """ - return _check_nd_call(numpy.bitwise_or, dpnp_bitwise_or, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return _check_nd_call( + numpy.bitwise_or, + dpnp_bitwise_or, + x1, + x2, + dtype=dtype, + out=out, + where=where, + **kwargs + ) def bitwise_xor(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -195,7 +226,16 @@ def bitwise_xor(x1, x2, dtype=None, out=None, where=True, **kwargs): [26, 5] """ - return _check_nd_call(numpy.bitwise_xor, dpnp_bitwise_xor, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return _check_nd_call( + numpy.bitwise_xor, + dpnp_bitwise_xor, + x1, + x2, + dtype=dtype, + out=out, + where=where, + **kwargs + ) def invert(x, **kwargs): @@ -267,7 +307,16 @@ def left_shift(x1, x2, dtype=None, out=None, where=True, **kwargs): [10, 20, 40] """ - return _check_nd_call(numpy.left_shift, dpnp_left_shift, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return _check_nd_call( + numpy.left_shift, + dpnp_left_shift, + x1, + x2, + dtype=dtype, + out=out, + where=where, + **kwargs + ) def right_shift(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -299,4 +348,13 @@ def right_shift(x1, x2, dtype=None, out=None, where=True, **kwargs): [5, 2, 1] """ - return _check_nd_call(numpy.right_shift, dpnp_right_shift, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return _check_nd_call( + numpy.right_shift, + dpnp_right_shift, + x1, + x2, + dtype=dtype, + out=out, + where=where, + **kwargs + ) diff --git a/dpnp/dpnp_iface_counting.py b/dpnp/dpnp_iface_counting.py index 9f14e3f36bfb..1f541319ced9 100644 --- a/dpnp/dpnp_iface_counting.py +++ b/dpnp/dpnp_iface_counting.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,17 +40,14 @@ """ -import dpnp import numpy +import dpnp import dpnp.config as config -from dpnp.dpnp_utils import * - from dpnp.dpnp_algo.dpnp_algo import * # TODO need to investigate why dpnp.dpnp_algo can not be used +from dpnp.dpnp_utils import * -__all__ = [ - 'count_nonzero' -] +__all__ = ["count_nonzero"] def count_nonzero(x1, axis=None, *, keepdims=False): diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index 6ff554d89d3a..d2e7131f6aac 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,12 +42,11 @@ import collections -from dpnp.dpnp_algo import * -from dpnp.dpnp_utils import * - -import dpnp import numpy +import dpnp +from dpnp.dpnp_algo import * +from dpnp.dpnp_utils import * __all__ = [ "choose", @@ -67,11 +66,11 @@ "tril_indices", "tril_indices_from", "triu_indices", - "triu_indices_from" + "triu_indices_from", ] -def choose(x1, choices, out=None, mode='raise'): +def choose(x1, choices, out=None, mode="raise"): """ Construct an array from an index array and a set of arrays to choose from. @@ -85,14 +84,16 @@ def choose(x1, choices, out=None, mode='raise'): choices_list = [] for choice in choices: - choices_list.append(dpnp.get_dpnp_descriptor(choice, copy_when_nondefault_queue=False)) + choices_list.append( + dpnp.get_dpnp_descriptor(choice, copy_when_nondefault_queue=False) + ) if x1_desc: if any(not desc for desc in choices_list): pass elif out is not None: pass - elif mode != 'raise': + elif mode != "raise": pass elif any(not choices[0].dtype == choice.dtype for choice in choices): pass @@ -101,7 +102,10 @@ def choose(x1, choices, out=None, mode='raise'): else: size = x1_desc.size choices_size = choices_list[0].size - if any(choice.size != choices_size or choice.size != size for choice in choices): + if any( + choice.size != choices_size or choice.size != size + for choice in choices + ): pass elif any(x >= choices_size for x in dpnp.asnumpy(x1)): pass @@ -195,7 +199,9 @@ def diag_indices_from(x1): # original limitation # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. - elif not numpy.alltrue(numpy.diff(x1_desc.shape) == 0): # TODO: replace alltrue and diff funcs with dpnp own ones + elif not numpy.alltrue( + numpy.diff(x1_desc.shape) == 0 + ): # TODO: replace alltrue and diff funcs with dpnp own ones pass else: return dpnp_diag_indices(x1_desc.shape[0], x1_desc.ndim) @@ -248,7 +254,9 @@ def fill_diagonal(x1, val, wrap=False): :obj:`dpnp.diag_indices_from` : Return the indices to access the main diagonal of an n-dimensional array. """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: if not dpnp.isscalar(val): pass @@ -339,6 +347,7 @@ def nonzero(x1): def place(x1, mask, vals): """ Change elements of an array based on conditional and input values. + For full documentation refer to :obj:`numpy.place`. Limitations @@ -356,9 +365,10 @@ def place(x1, mask, vals): return call_origin(numpy.place, x1, mask, vals, dpnp_inplace=True) -def put(x1, ind, v, mode='raise'): +def put(x1, ind, v, mode="raise"): """ Replaces specified elements of an array with given values. + For full documentation refer to :obj:`numpy.put`. Limitations @@ -367,13 +377,17 @@ def put(x1, ind, v, mode='raise'): Not supported parameter mode. """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - if mode != 'raise': + if mode != "raise": pass elif type(ind) != type(v): pass - elif numpy.max(ind) >= x1_desc.size or numpy.min(ind) + x1_desc.size < 0: + elif ( + numpy.max(ind) >= x1_desc.size or numpy.min(ind) + x1_desc.size < 0 + ): pass else: return dpnp_put(x1_desc, ind, v) @@ -384,6 +398,7 @@ def put(x1, ind, v, mode='raise'): def put_along_axis(x1, indices, values, axis): """ Put values into the destination array by matching 1d index and data slices. + For full documentation refer to :obj:`numpy.put_along_axis`. See Also @@ -392,8 +407,12 @@ def put_along_axis(x1, indices, values, axis): """ x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - indices_desc = dpnp.get_dpnp_descriptor(indices, copy_when_nondefault_queue=False) - values_desc = dpnp.get_dpnp_descriptor(values, copy_when_nondefault_queue=False) + indices_desc = dpnp.get_dpnp_descriptor( + indices, copy_when_nondefault_queue=False + ) + values_desc = dpnp.get_dpnp_descriptor( + values, copy_when_nondefault_queue=False + ) if x1_desc and indices_desc and values_desc: if x1_desc.ndim != indices_desc.ndim: pass @@ -406,12 +425,15 @@ def put_along_axis(x1, indices, values, axis): else: return dpnp_put_along_axis(x1_desc, indices_desc, values_desc, axis) - return call_origin(numpy.put_along_axis, x1, indices, values, axis, dpnp_inplace=True) + return call_origin( + numpy.put_along_axis, x1, indices, values, axis, dpnp_inplace=True + ) def putmask(x1, mask, values): """ Changes elements of an array based on conditional and input values. + For full documentation refer to :obj:`numpy.putmask`. Limitations @@ -419,9 +441,13 @@ def putmask(x1, mask, values): Input arrays ``arr``, ``mask`` and ``values`` are supported as :obj:`dpnp.ndarray`. """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) mask_desc = dpnp.get_dpnp_descriptor(mask, copy_when_nondefault_queue=False) - values_desc = dpnp.get_dpnp_descriptor(values, copy_when_nondefault_queue=False) + values_desc = dpnp.get_dpnp_descriptor( + values, copy_when_nondefault_queue=False + ) if x1_desc and mask_desc and values_desc: return dpnp_putmask(x1_desc, mask_desc, values_desc) @@ -431,6 +457,7 @@ def putmask(x1, mask, values): def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. + For full documentation refer to :obj:`numpy.select`. Limitations @@ -460,9 +487,10 @@ def select(condlist, choicelist, default=0): return call_origin(numpy.select, condlist, choicelist, default) -def take(x1, indices, axis=None, out=None, mode='raise'): +def take(x1, indices, axis=None, out=None, mode="raise"): """ Take elements from an array. + For full documentation refer to :obj:`numpy.take`. Limitations @@ -478,13 +506,15 @@ def take(x1, indices, axis=None, out=None, mode='raise'): """ x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - indices_desc = dpnp.get_dpnp_descriptor(indices, copy_when_nondefault_queue=False) + indices_desc = dpnp.get_dpnp_descriptor( + indices, copy_when_nondefault_queue=False + ) if x1_desc and indices_desc: if axis is not None: pass elif out is not None: pass - elif mode != 'raise': + elif mode != "raise": pass else: return dpnp_take(x1_desc, indices_desc).get_pyobj() @@ -495,6 +525,7 @@ def take(x1, indices, axis=None, out=None, mode='raise'): def take_along_axis(x1, indices, axis): """ Take values from the input array by matching 1d index and data slices. + For full documentation refer to :obj:`numpy.take_along_axis`. See Also @@ -504,7 +535,9 @@ def take_along_axis(x1, indices, axis): """ x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - indices_desc = dpnp.get_dpnp_descriptor(indices, copy_when_nondefault_queue=False) + indices_desc = dpnp.get_dpnp_descriptor( + indices, copy_when_nondefault_queue=False + ) if x1_desc and indices_desc: if x1_desc.ndim != indices_desc.ndim: pass @@ -556,8 +589,11 @@ def tril_indices(n, k=0, m=None): """ if not use_origin_backend(): - if isinstance(n, int) and isinstance(k, int) \ - and (isinstance(m, int) or m is None): + if ( + isinstance(n, int) + and isinstance(k, int) + and (isinstance(m, int) or m is None) + ): return dpnp_tril_indices(n, k, m) return call_origin(numpy.tril_indices, n, k, m) @@ -566,6 +602,7 @@ def tril_indices(n, k=0, m=None): def tril_indices_from(x1, k=0): """ Return the indices for the lower-triangle of arr. + See `tril_indices` for full details. Parameters @@ -613,8 +650,11 @@ def triu_indices(n, k=0, m=None): """ if not use_origin_backend(): - if isinstance(n, int) and isinstance(k, int) \ - and (isinstance(m, int) or m is None): + if ( + isinstance(n, int) + and isinstance(k, int) + and (isinstance(m, int) or m is None) + ): return dpnp_triu_indices(n, k, m) return call_origin(numpy.triu_indices, n, k, m) @@ -623,6 +663,7 @@ def triu_indices(n, k=0, m=None): def triu_indices_from(x1, k=0): """ Return the indices for the lower-triangle of arr. + See `tril_indices` for full details. Parameters diff --git a/dpnp/dpnp_iface_libmath.py b/dpnp/dpnp_iface_libmath.py index 934f420bd68b..e92476f75600 100644 --- a/dpnp/dpnp_iface_libmath.py +++ b/dpnp/dpnp_iface_libmath.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -41,13 +41,11 @@ import math +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp -__all__ = [ - "erf" -] +__all__ = ["erf"] def erf(in_array1): @@ -66,7 +64,6 @@ def erf(in_array1): Examples -------- - >>> import dpnp as np >>> x = np.linspace(2.0, 3.0, num=5) >>> [i for i in x] @@ -81,7 +78,9 @@ def erf(in_array1): if x1_desc: return dpnp_erf(x1_desc).get_pyobj() - result = create_output_descriptor_py(in_array1.shape, in_array1.dtype, None).get_pyobj() + result = create_output_descriptor_py( + in_array1.shape, in_array1.dtype, None + ).get_pyobj() for i in range(result.size): result[i] = math.erf(in_array1[i]) diff --git a/dpnp/dpnp_iface_linearalgebra.py b/dpnp/dpnp_iface_linearalgebra.py index 7cd76d7c22b5..59c181c4550f 100644 --- a/dpnp/dpnp_iface_linearalgebra.py +++ b/dpnp/dpnp_iface_linearalgebra.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,13 +40,12 @@ """ -from dpnp.dpnp_algo import * -from dpnp.dpnp_utils import * -import dpnp -import dpnp.config as config - import numpy +import dpnp +import dpnp.config as config +from dpnp.dpnp_algo import * +from dpnp.dpnp_utils import * __all__ = [ "dot", @@ -57,7 +56,7 @@ "matmul", "outer", "tensordot", - "vdot" + "vdot", ] @@ -91,16 +90,32 @@ def dot(x1, x2, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: # TODO: remove fallback with scalars when muliply backend func will support strides - if(x1_desc.ndim == 0 and x2_desc.strides is not None - or x2_desc.ndim == 0 and x1_desc.strides is not None): + if ( + x1_desc.ndim == 0 + and x2_desc.strides is not None + or x2_desc.ndim == 0 + and x1_desc.strides is not None + ): pass - elif (x1_desc.ndim >= 1 and x2_desc.ndim > 1 and x1_desc.shape[-1] != x2_desc.shape[-2]): + elif ( + x1_desc.ndim >= 1 + and x2_desc.ndim > 1 + and x1_desc.shape[-1] != x2_desc.shape[-2] + ): pass - elif (x1_desc.ndim > 0 and x2_desc.ndim == 1 and x1_desc.shape[-1] != x2_desc.shape[0]): + elif ( + x1_desc.ndim > 0 + and x2_desc.ndim == 1 + and x1_desc.shape[-1] != x2_desc.shape[0] + ): pass else: return dpnp_dot(x1_desc, x2_desc).get_pyobj() @@ -132,8 +147,7 @@ def einsum(*args, **kwargs): def einsum_path(*args, **kwargs): """ - Evaluates the lowest cost contraction order for an einsum expression - by considering the creation of intermediate arrays. + Evaluates the lowest cost contraction order for an einsum expression by considering the creation of intermediate arrays. For full documentation refer to :obj:`numpy.einsum_path`. @@ -268,7 +282,9 @@ def matmul(x1, x2, out=None, **kwargs): array2_size = x2_desc.size cost_size = 4096 # 2D array shape(64, 64) - if ((x1_desc.dtype == numpy.float64) or (x1_desc.dtype == numpy.float32)): + if (x1_desc.dtype == numpy.float64) or ( + x1_desc.dtype == numpy.float32 + ): """ Floating point types are handled via original math library better than SYCL math library """ @@ -277,7 +293,13 @@ def matmul(x1, x2, out=None, **kwargs): if (array1_size > cost_size) and (array2_size > cost_size): return dpnp_matmul(x1_desc, x2_desc, out) else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor( + out, copy_when_nondefault_queue=False + ) + if out is not None + else None + ) return dpnp_matmul(x1_desc, x2_desc, out_desc).get_pyobj() return call_origin(numpy.matmul, x1, x2, out=out, **kwargs) diff --git a/dpnp/dpnp_iface_logic.py b/dpnp/dpnp_iface_logic.py index 0f1e1b5fc0e5..5f10843f3782 100644 --- a/dpnp/dpnp_iface_logic.py +++ b/dpnp/dpnp_iface_logic.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -41,13 +41,12 @@ import numpy -import dpnp +import dpnp import dpnp.config as config from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * - __all__ = [ "all", "allclose", @@ -65,7 +64,7 @@ "logical_not", "logical_or", "logical_xor", - "not_equal" + "not_equal", ] @@ -125,7 +124,7 @@ def all(x1, axis=None, out=None, keepdims=False): return call_origin(numpy.all, x1, axis, out, keepdims) -def allclose(x1, x2, rtol=1.e-5, atol=1.e-8, **kwargs): +def allclose(x1, x2, rtol=1.0e-5, atol=1.0e-8, **kwargs): """ Returns True if two arrays are element-wise equal within a tolerance. @@ -388,7 +387,9 @@ def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): # result_obj = dpnp_isclose(x1_desc, x2_desc, rtol, atol, equal_nan).get_pyobj() # return result_obj - return call_origin(numpy.isclose, x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan) + return call_origin( + numpy.isclose, x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan + ) def isfinite(x1, out=None, **kwargs): diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 45938f0d52ad..3815f35d0582 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,13 +42,12 @@ import collections.abc -from dpnp.dpnp_algo import * -from dpnp.dpnp_utils import * -from dpnp.dpnp_iface_arraycreation import array - -import dpnp import numpy +import dpnp +from dpnp.dpnp_algo import * +from dpnp.dpnp_iface_arraycreation import array +from dpnp.dpnp_utils import * __all__ = [ "asfarray", @@ -69,7 +68,7 @@ "swapaxes", "transpose", "unique", - "vstack" + "vstack", ] @@ -103,6 +102,7 @@ def asfarray(x1, dtype=numpy.float64): def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. + Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. @@ -131,7 +131,9 @@ def atleast_2d(*arys): all_is_array = True arys_desc = [] for ary in arys: - ary_desc = dpnp.get_dpnp_descriptor(ary, copy_when_nondefault_queue=False) + ary_desc = dpnp.get_dpnp_descriptor( + ary, copy_when_nondefault_queue=False + ) if ary_desc: arys_desc.append(ary_desc) else: @@ -166,7 +168,9 @@ def atleast_3d(*arys): all_is_array = True arys_desc = [] for ary in arys: - ary_desc = dpnp.get_dpnp_descriptor(ary, copy_when_nondefault_queue=False) + ary_desc = dpnp.get_dpnp_descriptor( + ary, copy_when_nondefault_queue=False + ) if ary_desc: arys_desc.append(ary_desc) else: @@ -212,10 +216,17 @@ def concatenate(arrs, axis=0, out=None, dtype=None, casting="same_kind"): [1 2 3 4 5 6] """ - return call_origin(numpy.concatenate, arrs, axis=axis, out=out, dtype=dtype, casting=casting) + return call_origin( + numpy.concatenate, + arrs, + axis=axis, + out=out, + dtype=dtype, + casting=casting, + ) -def copyto(dst, src, casting='same_kind', where=True): +def copyto(dst, src, casting="same_kind", where=True): """ Copies values from one array to another, broadcasting as necessary. @@ -232,18 +243,38 @@ def copyto(dst, src, casting='same_kind', where=True): """ - dst_desc = dpnp.get_dpnp_descriptor(dst, copy_when_strides=False, copy_when_nondefault_queue=False) + dst_desc = dpnp.get_dpnp_descriptor( + dst, copy_when_strides=False, copy_when_nondefault_queue=False + ) src_desc = dpnp.get_dpnp_descriptor(src, copy_when_nondefault_queue=False) if dst_desc and src_desc: - if casting != 'same_kind': + if casting != "same_kind": pass - elif (dst_desc.dtype == dpnp.bool and # due to 'same_kind' casting - src_desc.dtype in [dpnp.int32, dpnp.int64, dpnp.float32, dpnp.float64, dpnp.complex128]): + elif ( + dst_desc.dtype == dpnp.bool + and src_desc.dtype # due to 'same_kind' casting + in [ + dpnp.int32, + dpnp.int64, + dpnp.float32, + dpnp.float64, + dpnp.complex128, + ] + ): pass - elif (dst_desc.dtype in [dpnp.int32, dpnp.int64] and # due to 'same_kind' casting - src_desc.dtype in [dpnp.float32, dpnp.float64, dpnp.complex128]): + elif dst_desc.dtype in [ + dpnp.int32, + dpnp.int64, + ] and src_desc.dtype in [ # due to 'same_kind' casting + dpnp.float32, + dpnp.float64, + dpnp.complex128, + ]: pass - elif dst_desc.dtype in [dpnp.float32, dpnp.float64] and src_desc.dtype == dpnp.complex128: # due to 'same_kind' casting + elif ( + dst_desc.dtype in [dpnp.float32, dpnp.float64] + and src_desc.dtype == dpnp.complex128 + ): # due to 'same_kind' casting pass elif where is not True: pass @@ -254,7 +285,9 @@ def copyto(dst, src, casting='same_kind', where=True): else: return dpnp_copyto(dst_desc, src_desc, where=where) - return call_origin(numpy.copyto, dst, src, casting, where, dpnp_inplace=True) + return call_origin( + numpy.copyto, dst, src, casting, where, dpnp_inplace=True + ) def expand_dims(x1, axis): @@ -385,7 +418,9 @@ def moveaxis(x1, source, destination): input_permute.append(i) # insert moving axes into proper positions - for destination_id, source_id in sorted(zip(destination_norm, source_norm)): + for destination_id, source_id in sorted( + zip(destination_norm, source_norm) + ): # if destination_id in input_permute: # pytest tests/third_party/cupy/manipulation_tests/test_transpose.py::TestTranspose::test_moveaxis_invalid5_3 # checker_throw_value_error("swapaxes", "source_id exists", source_id, input_permute) @@ -396,7 +431,7 @@ def moveaxis(x1, source, destination): return call_origin(numpy.moveaxis, x1, source, destination) -def ravel(x1, order='C'): +def ravel(x1, order="C"): """ Return a contiguous flattened array. @@ -466,7 +501,7 @@ def repeat(x1, repeats, axis=None): return call_origin(numpy.repeat, x1, repeats, axis) -def reshape(x1, newshape, order='C'): +def reshape(x1, newshape, order="C"): """ Gives a new shape to an array without changing its data. @@ -480,7 +515,7 @@ def reshape(x1, newshape, order='C'): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: - if order != 'C': + if order != "C": pass else: return dpnp_reshape(x1_desc, newshape, order).get_pyobj() @@ -624,7 +659,10 @@ def swapaxes(x1, axis1, axis2): # 'do nothing' pattern for transpose() input_permute = [i for i in range(x1.ndim)] # swap axes - input_permute[axis1], input_permute[axis2] = input_permute[axis2], input_permute[axis1] + input_permute[axis1], input_permute[axis2] = ( + input_permute[axis2], + input_permute[axis1], + ) return transpose(x1_desc.get_pyobj(), axes=input_permute) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index d4515b43cd3a..653b7d1e7dfe 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,12 +40,11 @@ """ -from dpnp.dpnp_algo import * -from dpnp.dpnp_utils import * - -import dpnp import numpy +import dpnp +from dpnp.dpnp_algo import * +from dpnp.dpnp_utils import * __all__ = [ "abs", @@ -89,7 +88,7 @@ "sum", "trapz", "true_divide", - "trunc" + "trunc", ] @@ -181,8 +180,12 @@ def add(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -202,10 +205,18 @@ def add(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_add(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_add( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.add, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.add, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def around(x1, decimals=0, out=None): @@ -277,9 +288,15 @@ def ceil(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_ceil(x1_desc, out_desc).get_pyobj() return call_origin(numpy.ceil, x1, out=out, **kwargs) @@ -307,7 +324,9 @@ def conjugate(x1, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: return dpnp_conjugate(x1_desc).get_pyobj() @@ -317,7 +336,7 @@ def conjugate(x1, **kwargs): conj = conjugate -def convolve(a, v, mode='full'): +def convolve(a, v, mode="full"): """ Returns the discrete, linear convolution of two one-dimensional sequences. @@ -359,8 +378,12 @@ def copysign(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -380,9 +403,13 @@ def copysign(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - return dpnp_copysign(x1_desc, x2_desc, dtype=dtype, out=out, where=where).get_pyobj() + return dpnp_copysign( + x1_desc, x2_desc, dtype=dtype, out=out, where=where + ).get_pyobj() - return call_origin(numpy.copysign, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.copysign, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): @@ -531,7 +558,9 @@ def diff(x1, n=1, axis=-1, prepend=numpy._NoValue, append=numpy._NoValue): else: return dpnp_diff(x1_desc, n).get_pyobj() - return call_origin(numpy.diff, x1, n=n, axis=axis, prepend=prepend, append=append) + return call_origin( + numpy.diff, x1, n=n, axis=axis, prepend=prepend, append=append + ) def divide(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -559,8 +588,12 @@ def divide(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -580,9 +613,13 @@ def divide(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - return dpnp_divide(x1_desc, x2_desc, dtype=dtype, out=out, where=where).get_pyobj() + return dpnp_divide( + x1_desc, x2_desc, dtype=dtype, out=out, where=where + ).get_pyobj() - return call_origin(numpy.divide, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.divide, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def ediff1d(x1, to_end=None, to_begin=None): @@ -650,7 +687,9 @@ def fabs(x1, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_fabs(x1_desc).get_pyobj() @@ -690,9 +729,15 @@ def floor(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_floor(x1_desc, out_desc).get_pyobj() return call_origin(numpy.floor, x1, out=out, **kwargs) @@ -760,10 +805,18 @@ def floor_divide(x1, x2, dtype=None, out=None, where=True, **kwargs): elif x1_is_scalar and x2_desc.ndim > 1: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_floor_divide(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_floor_divide( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.floor_divide, x1, x2, out=out, where=where, dtype=dtype, **kwargs) + return call_origin( + numpy.floor_divide, x1, x2, out=out, where=where, dtype=dtype, **kwargs + ) def fmax(*args, **kwargs): @@ -840,8 +893,12 @@ def fmod(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -861,10 +918,18 @@ def fmod(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_fmod(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_fmod( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.fmod, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.fmod, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def gradient(x1, *varargs, **kwargs): @@ -940,8 +1005,12 @@ def maximum(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -961,9 +1030,13 @@ def maximum(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - return dpnp_maximum(x1_desc, x2_desc, dtype=dtype, out=out, where=where).get_pyobj() + return dpnp_maximum( + x1_desc, x2_desc, dtype=dtype, out=out, where=where + ).get_pyobj() - return call_origin(numpy.maximum, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.maximum, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def minimum(x1, x2, dtype=None, out=None, where=True, **kwargs): @@ -997,8 +1070,12 @@ def minimum(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -1018,9 +1095,13 @@ def minimum(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - return dpnp_minimum(x1_desc, x2_desc, dtype=dtype, out=out, where=where).get_pyobj() + return dpnp_minimum( + x1_desc, x2_desc, dtype=dtype, out=out, where=where + ).get_pyobj() - return call_origin(numpy.minimum, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.minimum, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def mod(*args, **kwargs): @@ -1101,8 +1182,12 @@ def multiply(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x2_desc and not x2_is_scalar: @@ -1120,9 +1205,13 @@ def multiply(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - return dpnp_multiply(x1_desc, x2_desc, dtype=dtype, out=out, where=where).get_pyobj() + return dpnp_multiply( + x1_desc, x2_desc, dtype=dtype, out=out, where=where + ).get_pyobj() - return call_origin(numpy.multiply, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.multiply, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def nancumprod(x1, **kwargs): @@ -1284,7 +1373,9 @@ def negative(x1, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: return dpnp_negative(x1_desc).get_pyobj() @@ -1325,8 +1416,12 @@ def power(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -1344,13 +1439,29 @@ def power(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_power(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() - - return call_origin(numpy.power, x1, x2, dtype=dtype, out=out, where=where, **kwargs) - - -def prod(x1, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True): + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_power( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() + + return call_origin( + numpy.power, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) + + +def prod( + x1, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, +): """ Calculate product of array elements over a given axis. @@ -1376,13 +1487,30 @@ def prod(x1, axis=None, dtype=None, out=None, keepdims=False, initial=None, wher if where is not True: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - result_obj = dpnp_prod(x1_desc, axis, dtype, out_desc, keepdims, initial, where).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj, keepdims) + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + result_obj = dpnp_prod( + x1_desc, axis, dtype, out_desc, keepdims, initial, where + ).get_pyobj() + result = dpnp.convert_single_elem_array_to_scalar( + result_obj, keepdims + ) return result - return call_origin(numpy.prod, x1, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where) + return call_origin( + numpy.prod, + x1, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) def remainder(x1, x2, out=None, where=True, dtype=None, **kwargs): @@ -1447,10 +1575,18 @@ def remainder(x1, x2, out=None, where=True, dtype=None, **kwargs): elif x1_is_scalar and x2_desc.ndim > 1: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_remainder(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_remainder( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.remainder, x1, x2, out=out, where=where, dtype=dtype, **kwargs) + return call_origin( + numpy.remainder, x1, x2, out=out, where=where, dtype=dtype, **kwargs + ) def round_(a, decimals=0, out=None): @@ -1490,7 +1626,9 @@ def sign(x1, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: return dpnp_sign(x1_desc).get_pyobj() @@ -1522,8 +1660,12 @@ def subtract(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -1547,13 +1689,29 @@ def subtract(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_subtract(x1_desc, x2_desc, dtype=dtype, out=out_desc, where=where).get_pyobj() - - return call_origin(numpy.subtract, x1, x2, dtype=dtype, out=out, where=where, **kwargs) - - -def sum(x1, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True): + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_subtract( + x1_desc, x2_desc, dtype=dtype, out=out_desc, where=where + ).get_pyobj() + + return call_origin( + numpy.subtract, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) + + +def sum( + x1, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, +): """ Sum of array elements over a given axis. @@ -1579,13 +1737,30 @@ def sum(x1, axis=None, dtype=None, out=None, keepdims=False, initial=None, where if where is not True: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - result_obj = dpnp_sum(x1_desc, axis, dtype, out_desc, keepdims, initial, where).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj, keepdims) + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + result_obj = dpnp_sum( + x1_desc, axis, dtype, out_desc, keepdims, initial, where + ).get_pyobj() + result = dpnp.convert_single_elem_array_to_scalar( + result_obj, keepdims + ) return result - return call_origin(numpy.sum, x1, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where) + return call_origin( + numpy.sum, + x1, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) def trapz(y1, x1=None, dx=1.0, axis=-1): @@ -1622,15 +1797,19 @@ def trapz(y1, x1=None, dx=1.0, axis=-1): else: y_obj = y_desc.get_array() if x1 is None: - x_obj = dpnp.empty(y_desc.shape, - dtype=y_desc.dtype, - device=y_obj.sycl_device, - usm_type=y_obj.usm_type, - sycl_queue=y_obj.sycl_queue) + x_obj = dpnp.empty( + y_desc.shape, + dtype=y_desc.dtype, + device=y_obj.sycl_device, + usm_type=y_obj.usm_type, + sycl_queue=y_obj.sycl_queue, + ) else: x_obj = x1 - x_desc = dpnp.get_dpnp_descriptor(x_obj, copy_when_nondefault_queue=False) + x_desc = dpnp.get_dpnp_descriptor( + x_obj, copy_when_nondefault_queue=False + ) # TODO: change to "not x_desc" if x_desc: pass @@ -1692,9 +1871,15 @@ def trunc(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_trunc(x1_desc, out_desc).get_pyobj() return call_origin(numpy.trunc, x1, out=out, **kwargs) diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index cef5d686035b..2d3603015f3e 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,19 +40,13 @@ """ -from dpnp.dpnp_algo import * -from dpnp.dpnp_utils import * - -import dpnp import numpy +import dpnp +from dpnp.dpnp_algo import * +from dpnp.dpnp_utils import * -__all__ = [ - 'argmax', - 'argmin', - 'searchsorted', - 'where' -] +__all__ = ["argmax", "argmin", "searchsorted", "where"] def argmax(x1, axis=None, out=None): @@ -165,7 +159,7 @@ def argmin(x1, axis=None, out=None): return call_origin(numpy.argmin, x1, axis, out) -def searchsorted(a, v, side='left', sorter=None): +def searchsorted(a, v, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. diff --git a/dpnp/dpnp_iface_sorting.py b/dpnp/dpnp_iface_sorting.py index cdce86cbacc4..8f700299f2fb 100644 --- a/dpnp/dpnp_iface_sorting.py +++ b/dpnp/dpnp_iface_sorting.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,18 +42,11 @@ import numpy +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp - - -__all__ = [ - 'argsort', - 'partition', - 'searchsorted', - 'sort' -] +__all__ = ["argsort", "partition", "searchsorted", "sort"] def argsort(in_array1, axis=-1, kind=None, order=None): @@ -89,7 +82,9 @@ def argsort(in_array1, axis=-1, kind=None, order=None): """ - x1_desc = dpnp.get_dpnp_descriptor(in_array1, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + in_array1, copy_when_nondefault_queue=False + ) if x1_desc: if axis != -1: pass @@ -103,9 +98,10 @@ def argsort(in_array1, axis=-1, kind=None, order=None): return call_origin(numpy.argsort, in_array1, axis, kind, order) -def partition(x1, kth, axis=-1, kind='introselect', order=None): +def partition(x1, kth, axis=-1, kind="introselect", order=None): """ Return a partitioned copy of an array. + For full documentation refer to :obj:`numpy.partition`. Limitations @@ -125,7 +121,7 @@ def partition(x1, kth, axis=-1, kind='introselect', order=None): pass elif axis != -1: pass - elif kind != 'introselect': + elif kind != "introselect": pass elif order is not None: pass @@ -135,9 +131,10 @@ def partition(x1, kth, axis=-1, kind='introselect', order=None): return call_origin(numpy.partition, x1, kth, axis, kind, order) -def searchsorted(x1, x2, side='left', sorter=None): +def searchsorted(x1, x2, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. + For full documentation refer to :obj:`numpy.searchsorted`. Limitations @@ -155,7 +152,7 @@ def searchsorted(x1, x2, side='left', sorter=None): pass elif x1_desc.dtype != x2_desc.dtype: pass - elif side not in ['left', 'right']: + elif side not in ["left", "right"]: pass elif sorter is not None: pass diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 27eaf4a115f5..7ec247dd7905 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,26 +42,25 @@ import numpy +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp - __all__ = [ - 'amax', - 'amin', - 'average', - 'bincount', - 'correlate', - 'cov', - 'histogram', - 'max', - 'mean', - 'median', - 'min', - 'nanvar', - 'std', - 'var', + "amax", + "amin", + "average", + "bincount", + "correlate", + "cov", + "histogram", + "max", + "mean", + "median", + "min", + "nanvar", + "std", + "var", ] @@ -193,7 +192,7 @@ def bincount(x1, weights=None, minlength=0): return call_origin(numpy.bincount, x1, weights=weights, minlength=minlength) -def correlate(x1, x2, mode='valid'): +def correlate(x1, x2, mode="valid"): """ Cross-correlation of two 1-dimensional sequences. @@ -228,7 +227,7 @@ def correlate(x1, x2, mode='valid'): pass elif x1_desc.shape != x2_desc.shape: pass - elif mode != 'valid': + elif mode != "valid": pass else: return dpnp_correlate(x1_desc, x2_desc).get_pyobj() @@ -236,7 +235,9 @@ def correlate(x1, x2, mode='valid'): return call_origin(numpy.correlate, x1, x2, mode=mode) -def cov(x1, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): +def cov( + x1, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None +): """ Estimate a covariance matrix, given data and weights. @@ -292,7 +293,10 @@ def cov(x1, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights= pass else: if x1_desc.dtype != dpnp.float64: - x1_desc = dpnp.get_dpnp_descriptor(dpnp.astype(x1, dpnp.float64), copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + dpnp.astype(x1, dpnp.float64), + copy_when_nondefault_queue=False, + ) return dpnp_cov(x1_desc).get_pyobj() @@ -302,7 +306,9 @@ def cov(x1, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights= def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): """ Compute the histogram of a dataset. + For full documentation refer to :obj:`numpy.histogram`. + Examples -------- >>> import dpnp @@ -323,7 +329,15 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): 1.0 """ - return call_origin(numpy.histogram, a=a, bins=bins, range=range, normed=normed, weights=weights, density=density) + return call_origin( + numpy.histogram, + a=a, + bins=bins, + range=range, + normed=normed, + weights=weights, + density=density, + ) def max(x1, axis=None, out=None, keepdims=False, initial=None, where=True): @@ -569,7 +583,15 @@ def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return result - return call_origin(numpy.nanvar, x1, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) + return call_origin( + numpy.nanvar, + x1, + axis=axis, + dtype=dtype, + out=out, + ddof=ddof, + keepdims=keepdims, + ) def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index c50ec260adae..d2680dea6b98 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -42,42 +42,41 @@ import numpy +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * -import dpnp - __all__ = [ - 'arccos', - 'arccosh', - 'arcsin', - 'arcsinh', - 'arctan', - 'arctan2', - 'arctanh', - 'cbrt', - 'cos', - 'cosh', - 'deg2rad', - 'degrees', - 'exp', - 'exp2', - 'expm1', - 'hypot', - 'log', - 'log10', - 'log1p', - 'log2', - 'rad2deg', - 'radians', - 'reciprocal', - 'sin', - 'sinh', - 'sqrt', - 'square', - 'tan', - 'tanh', - 'unwrap' + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "cbrt", + "cos", + "cosh", + "deg2rad", + "degrees", + "exp", + "exp2", + "expm1", + "hypot", + "log", + "log10", + "log1p", + "log2", + "rad2deg", + "radians", + "reciprocal", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "unwrap", ] @@ -108,7 +107,9 @@ def arccos(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_arccos(x1_desc).get_pyobj() @@ -145,7 +146,9 @@ def arccosh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_arccosh(x1_desc).get_pyobj() @@ -184,9 +187,15 @@ def arcsin(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_arcsin(x1_desc, out_desc).get_pyobj() return call_origin(numpy.arcsin, x1, out=out, **kwargs) @@ -214,7 +223,9 @@ def arcsinh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_arcsinh(x1_desc).get_pyobj() @@ -249,9 +260,15 @@ def arctan(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_arctan(x1_desc, out_desc).get_pyobj() return call_origin(numpy.arctan, x1, out=out, **kwargs) @@ -278,7 +295,9 @@ def arctanh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_arctanh(x1_desc).get_pyobj() @@ -306,7 +325,9 @@ def cbrt(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_cbrt(x1_desc).get_pyobj() @@ -346,8 +367,12 @@ def arctan2(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -365,10 +390,18 @@ def arctan2(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_arctan2(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_arctan2( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.arctan2, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.arctan2, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def cos(x1, out=None, **kwargs): @@ -393,9 +426,15 @@ def cos(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_cos(x1_desc, out_desc).get_pyobj() return call_origin(numpy.cos, x1, out=out, **kwargs) @@ -422,7 +461,9 @@ def cosh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_cosh(x1_desc).get_pyobj() @@ -473,7 +514,9 @@ def degrees(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_degrees(x1_desc).get_pyobj() @@ -506,9 +549,15 @@ def exp(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_exp(x1_desc, out_desc).get_pyobj() return call_origin(numpy.exp, x1, out=out, **kwargs) @@ -540,7 +589,9 @@ def exp2(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_exp2(x1_desc).get_pyobj() @@ -570,7 +621,9 @@ def expm1(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_expm1(x1_desc).get_pyobj() @@ -604,8 +657,12 @@ def hypot(x1, x2, dtype=None, out=None, where=True, **kwargs): x1_is_scalar = dpnp.isscalar(x1) x2_is_scalar = dpnp.isscalar(x2) - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) - x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) + x2_desc = dpnp.get_dpnp_descriptor( + x2, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and x2_desc and not kwargs: if not x1_desc and not x1_is_scalar: @@ -625,10 +682,18 @@ def hypot(x1, x2, dtype=None, out=None, where=True, **kwargs): elif not where: pass else: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None - return dpnp_hypot(x1_desc, x2_desc, dtype, out_desc, where).get_pyobj() + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) + return dpnp_hypot( + x1_desc, x2_desc, dtype, out_desc, where + ).get_pyobj() - return call_origin(numpy.hypot, x1, x2, dtype=dtype, out=out, where=where, **kwargs) + return call_origin( + numpy.hypot, x1, x2, dtype=dtype, out=out, where=where, **kwargs + ) def log(x1, out=None, **kwargs): @@ -661,9 +726,15 @@ def log(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_log(x1_desc, out_desc).get_pyobj() return call_origin(numpy.log, x1, out=out, **kwargs) @@ -690,7 +761,9 @@ def log10(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_log10(x1_desc).get_pyobj() @@ -722,7 +795,9 @@ def log1p(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_log1p(x1_desc).get_pyobj() @@ -758,7 +833,9 @@ def log2(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_log2(x1_desc).get_pyobj() @@ -788,7 +865,9 @@ def reciprocal(x1, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc and not kwargs: return dpnp_recip(x1_desc).get_pyobj() @@ -838,7 +917,9 @@ def radians(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_radians(x1_desc).get_pyobj() @@ -876,9 +957,15 @@ def sin(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_sin(x1_desc, out_desc).get_pyobj() return call_origin(numpy.sin, x1, out=out, **kwargs) @@ -906,7 +993,9 @@ def sinh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_sinh(x1_desc).get_pyobj() @@ -935,7 +1024,9 @@ def sqrt(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_sqrt(x1_desc).get_pyobj() @@ -970,7 +1061,9 @@ def square(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_square(x1_desc).get_pyobj() @@ -1000,9 +1093,15 @@ def tan(x1, out=None, **kwargs): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: - out_desc = dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) if out is not None else None + out_desc = ( + dpnp.get_dpnp_descriptor(out, copy_when_nondefault_queue=False) + if out is not None + else None + ) return dpnp_tan(x1_desc, out_desc).get_pyobj() return call_origin(numpy.tan, x1, out=out, **kwargs) @@ -1030,7 +1129,9 @@ def tanh(x1): """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False, copy_when_nondefault_queue=False) + x1_desc = dpnp.get_dpnp_descriptor( + x1, copy_when_strides=False, copy_when_nondefault_queue=False + ) if x1_desc: return dpnp_tanh(x1_desc).get_pyobj() diff --git a/dpnp/dpnp_iface_types.py b/dpnp/dpnp_iface_types.py index dfcf599bf3be..8e64d53a45e6 100644 --- a/dpnp/dpnp_iface_types.py +++ b/dpnp/dpnp_iface_types.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -56,7 +56,7 @@ "longcomplex", "nan", "newaxis", - "void" + "void", ] bool = numpy.bool @@ -95,11 +95,14 @@ def isscalar(obj): def is_type_supported(obj_type): - """ - Return True if type is supported by DPNP python level. - """ - - if obj_type == float64 or obj_type == float32 or obj_type == int64 or obj_type == int32: + """Return True if type is supported by DPNP python level.""" + + if ( + obj_type == float64 + or obj_type == float32 + or obj_type == int64 + or obj_type == int32 + ): return True return False diff --git a/dpnp/dpnp_utils/dpnp_algo_utils.pxd b/dpnp/dpnp_utils/dpnp_algo_utils.pxd index 0924dae26408..2571e6c62937 100644 --- a/dpnp/dpnp_utils/dpnp_algo_utils.pxd +++ b/dpnp/dpnp_utils/dpnp_algo_utils.pxd @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,8 +27,8 @@ from libcpp cimport bool as cpp_bool -from dpnp.dpnp_algo.dpnp_algo cimport DPNPFuncType, DPNPFuncName from dpnp.dpnp_algo cimport shape_type_c +from dpnp.dpnp_algo.dpnp_algo cimport DPNPFuncName, DPNPFuncType cpdef checker_throw_runtime_error(function_name, message) diff --git a/dpnp/dpnp_utils/dpnp_algo_utils.pyx b/dpnp/dpnp_utils/dpnp_algo_utils.pyx index aac77164b911..eed0d98360f5 100644 --- a/dpnp/dpnp_utils/dpnp_algo_utils.pyx +++ b/dpnp/dpnp_utils/dpnp_algo_utils.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -33,17 +33,22 @@ This module contains differnt helpers and utilities import dpctl import numpy + +import dpnp import dpnp.config as config import dpnp.dpnp_container as dpnp_container -import dpnp -from dpnp.dpnp_algo.dpnp_algo cimport dpnp_DPNPFuncType_to_dtype, dpnp_dtype_to_DPNPFuncType, get_dpnp_function_ptr -from libcpp cimport bool as cpp_bool -from libcpp.complex cimport complex as cpp_complex cimport cpython cimport cython cimport numpy +from libcpp cimport bool as cpp_bool +from libcpp.complex cimport complex as cpp_complex +from dpnp.dpnp_algo.dpnp_algo cimport ( + dpnp_DPNPFuncType_to_dtype, + dpnp_dtype_to_DPNPFuncType, + get_dpnp_function_ptr, +) """ Python import functions diff --git a/dpnp/fft/dpnp_algo_fft.pyx b/dpnp/fft/dpnp_algo_fft.pyx index d63c7bf9fc68..5ae1e06c03df 100644 --- a/dpnp/fft/dpnp_algo_fft.pyx +++ b/dpnp/fft/dpnp_algo_fft.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -33,9 +33,8 @@ and the rest of the library """ -from dpnp.dpnp_algo cimport * cimport dpnp.dpnp_utils as utils - +from dpnp.dpnp_algo cimport * __all__ = [ "dpnp_fft", diff --git a/dpnp/fft/dpnp_iface_fft.py b/dpnp/fft/dpnp_iface_fft.py index 952a9c72a8b6..a16996330f57 100644 --- a/dpnp/fft/dpnp_iface_fft.py +++ b/dpnp/fft/dpnp_iface_fft.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,13 +40,13 @@ """ -import dpnp +from enum import Enum + import numpy +import dpnp from dpnp.dpnp_utils import * from dpnp.fft.dpnp_algo_fft import * -from enum import Enum - __all__ = [ "fft", @@ -66,7 +66,7 @@ "rfft", "rfft2", "rfftfreq", - "rfftn" + "rfftn", ] @@ -75,6 +75,7 @@ class Norm(Enum): forward = 1 ortho = 2 + def get_validated_norm(norm): if norm is None or norm == "backward": return Norm.backward @@ -105,7 +106,7 @@ def fft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -115,16 +116,23 @@ def fft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif n is not None: pass elif axis != -1: pass else: output_boundarie = input_boundarie - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.fft, x1, n, axis, norm) @@ -208,7 +216,12 @@ def fftn(x1, s=None, axes=None, norm=None): try: param_n = boundaries[param_axis] except IndexError: - checker_throw_axis_error("fft.fftn", "is out of bounds", param_axis, f"< {len(boundaries)}") + checker_throw_axis_error( + "fft.fftn", + "is out of bounds", + param_axis, + f"< {len(boundaries)}", + ) x1_iter = fft(x1_iter, n=param_n, axis=param_axis, norm=norm) @@ -234,17 +247,24 @@ def fftshift(x1, axes=None): x1_desc = dpnp.get_dpnp_descriptor(x1) if x1_desc and 0: - norm_= Norm.backward + norm_ = Norm.backward if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axes if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception else: - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.fftshift, x1, axes) @@ -268,7 +288,7 @@ def hfft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -278,15 +298,22 @@ def hfft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif norm is not None: pass else: output_boundarie = input_boundarie - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.hfft, x1, n, axis, norm) @@ -310,7 +337,7 @@ def ifft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -320,14 +347,21 @@ def ifft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif n is not None: pass else: output_boundarie = input_boundarie - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, True, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + True, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.ifft, x1, n, axis, norm) @@ -378,14 +412,21 @@ def ifftshift(x1, axes=None): norm_ = Norm.backward if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axes if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception else: - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.ifftshift, x1, axes) @@ -429,10 +470,20 @@ def ifftn(x1, s=None, axes=None, norm=None): try: param_n = boundaries[param_axis] except IndexError: - checker_throw_axis_error("fft.ifftn", "is out of bounds", param_axis, f"< {len(boundaries)}") + checker_throw_axis_error( + "fft.ifftn", + "is out of bounds", + param_axis, + f"< {len(boundaries)}", + ) x1_iter_desc = dpnp.get_dpnp_descriptor(x1_iter) - x1_iter = ifft(x1_iter_desc.get_pyobj(), n=param_n, axis=param_axis, norm=norm) + x1_iter = ifft( + x1_iter_desc.get_pyobj(), + n=param_n, + axis=param_axis, + norm=norm, + ) return x1_iter @@ -458,7 +509,7 @@ def ihfft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -468,9 +519,9 @@ def ihfft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif norm is not None: pass elif n is not None: @@ -478,7 +529,14 @@ def ihfft(x1, n=None, axis=-1, norm=None): else: output_boundarie = input_boundarie - return dpnp_fft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + return dpnp_fft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.ihfft, x1, n, axis, norm) @@ -502,7 +560,7 @@ def irfft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -512,9 +570,9 @@ def irfft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif norm is not None: pass elif n is not None: @@ -522,7 +580,14 @@ def irfft(x1, n=None, axis=-1, norm=None): else: output_boundarie = 2 * (input_boundarie - 1) - result = dpnp_rfft(x1_desc, input_boundarie, output_boundarie, axis_param, True, norm_.value).get_pyobj() + result = dpnp_rfft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + True, + norm_.value, + ).get_pyobj() # TODO tmp = utils.create_output_array(result_shape, result_c_type, out) # tmp = dparray(result.shape, dtype=dpnp.float64) # for it in range(tmp.size): @@ -597,10 +662,20 @@ def irfftn(x1, s=None, axes=None, norm=None): try: param_n = boundaries[param_axis] except IndexError: - checker_throw_axis_error("fft.irfftn", "is out of bounds", param_axis, f"< {len(boundaries)}") + checker_throw_axis_error( + "fft.irfftn", + "is out of bounds", + param_axis, + f"< {len(boundaries)}", + ) x1_iter_desc = dpnp.get_dpnp_descriptor(x1_iter) - x1_iter = irfft(x1_iter_desc.get_pyobj(), n=param_n, axis=param_axis, norm=norm) + x1_iter = irfft( + x1_iter_desc.get_pyobj(), + n=param_n, + axis=param_axis, + norm=norm, + ) return x1_iter @@ -626,7 +701,7 @@ def rfft(x1, n=None, axis=-1, norm=None): norm_ = get_validated_norm(norm) if axis is None: - axis_param = -1 # the most right dimension (default value) + axis_param = -1 # the most right dimension (default value) else: axis_param = axis @@ -636,9 +711,9 @@ def rfft(x1, n=None, axis=-1, norm=None): input_boundarie = n if x1_desc.size < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif input_boundarie < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception elif axis != -1: pass elif norm is not None: @@ -648,8 +723,17 @@ def rfft(x1, n=None, axis=-1, norm=None): elif x1_desc.dtype in (numpy.complex128, numpy.complex64): pass else: - output_boundarie = input_boundarie // 2 + 1 # rfft specific requirenment - return dpnp_rfft(x1_desc, input_boundarie, output_boundarie, axis_param, False, norm_.value).get_pyobj() + output_boundarie = ( + input_boundarie // 2 + 1 + ) # rfft specific requirenment + return dpnp_rfft( + x1_desc, + input_boundarie, + output_boundarie, + axis_param, + False, + norm_.value, + ).get_pyobj() return call_origin(numpy.fft.rfft, x1, n, axis, norm) @@ -726,7 +810,7 @@ def rfftn(x1, s=None, axes=None, norm=None): if norm is not None: pass elif len(axes) < 1: - pass # let fallback to handle exception + pass # let fallback to handle exception else: x1_iter = x1 iteration_list = list(range(len(axes_param))) @@ -736,10 +820,20 @@ def rfftn(x1, s=None, axes=None, norm=None): try: param_n = boundaries[param_axis] except IndexError: - checker_throw_axis_error("fft.rfftn", "is out of bounds", param_axis, f"< {len(boundaries)}") + checker_throw_axis_error( + "fft.rfftn", + "is out of bounds", + param_axis, + f"< {len(boundaries)}", + ) x1_iter_desc = dpnp.get_dpnp_descriptor(x1_iter) - x1_iter = rfft(x1_iter_desc.get_pyobj(), n=param_n, axis=param_axis, norm=norm) + x1_iter = rfft( + x1_iter_desc.get_pyobj(), + n=param_n, + axis=param_axis, + norm=norm, + ) return x1_iter diff --git a/dpnp/linalg/dpnp_algo_linalg.pyx b/dpnp/linalg/dpnp_algo_linalg.pyx index 04efad5c600f..7a3bef020c90 100644 --- a/dpnp/linalg/dpnp_algo_linalg.pyx +++ b/dpnp/linalg/dpnp_algo_linalg.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -33,13 +33,15 @@ and the rest of the library """ import numpy + from dpnp.dpnp_algo cimport * -import dpnp.dpnp_utils as utils_py + import dpnp -cimport dpnp.dpnp_utils as utils +import dpnp.dpnp_utils as utils_py cimport numpy +cimport dpnp.dpnp_utils as utils __all__ = [ "dpnp_cholesky", diff --git a/dpnp/linalg/dpnp_iface_linalg.py b/dpnp/linalg/dpnp_iface_linalg.py index 2db35c5d2ace..f7aa66035f0e 100644 --- a/dpnp/linalg/dpnp_iface_linalg.py +++ b/dpnp/linalg/dpnp_iface_linalg.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,14 +40,13 @@ """ -import dpnp import numpy -from dpnp.dpnp_utils import * +import dpnp from dpnp.dpnp_algo import * +from dpnp.dpnp_utils import * from dpnp.linalg.dpnp_algo_linalg import * - __all__ = [ "cholesky", "cond", @@ -67,6 +66,7 @@ def cholesky(input): """ Cholesky decomposition. + Return the Cholesky decomposition, `L * L.H`, of the square matrix `input`, where `L` is lower-triangular and .H is the conjugate transpose operator (which is the ordinary transpose if `input` is real-valued). `input` must be @@ -95,7 +95,9 @@ def cholesky(input): else: if input.dtype == dpnp.int32 or input.dtype == dpnp.int64: # TODO memory copy. needs to move into DPNPC - input_ = dpnp.get_dpnp_descriptor(dpnp.astype(input, dpnp.float64)) + input_ = dpnp.get_dpnp_descriptor( + dpnp.astype(input, dpnp.float64) + ) else: input_ = x1_desc return dpnp_cholesky(input_).get_pyobj() @@ -106,6 +108,7 @@ def cholesky(input): def cond(input, p=None): """ Compute the condition number of a matrix. + For full documentation refer to :obj:`numpy.linalg.cond`. Limitations @@ -118,8 +121,8 @@ def cond(input, p=None): :obj:`dpnp.norm` : Matrix or vector norm. """ - if (not use_origin_backend(input)): - if p in [None, 1, -1, 2, -2, numpy.inf, -numpy.inf, 'fro']: + if not use_origin_backend(input): + if p in [None, 1, -1, 2, -2, numpy.inf, -numpy.inf, "fro"]: result_obj = dpnp_cond(input, p) result = dpnp.convert_single_elem_array_to_scalar(result_obj) @@ -166,7 +169,7 @@ def eig(x1): x1_desc = dpnp.get_dpnp_descriptor(x1) if x1_desc: - if (x1_desc.size > 0): + if x1_desc.size > 0: return dpnp_eig(x1_desc) return call_origin(numpy.linalg.eig, x1) @@ -175,6 +178,7 @@ def eig(x1): def eigvals(input): """ Compute the eigenvalues of a general matrix. + Main difference between `eigvals` and `eig`: the eigenvectors aren't returned. @@ -215,7 +219,11 @@ def inv(input): x1_desc = dpnp.get_dpnp_descriptor(input) if x1_desc: - if x1_desc.ndim == 2 and x1_desc.shape[0] == x1_desc.shape[1] and x1_desc.shape[0] >= 2: + if ( + x1_desc.ndim == 2 + and x1_desc.shape[0] == x1_desc.shape[1] + and x1_desc.shape[0] >= 2 + ): return dpnp_inv(x1_desc).get_pyobj() return call_origin(numpy.linalg.inv, input) @@ -242,7 +250,7 @@ def matrix_power(input, count): if not use_origin_backend() and count > 0: result = input - for id in range(count - 1): + for _ in range(count - 1): result = dpnp.matmul(result, input) return result @@ -252,7 +260,8 @@ def matrix_power(input, count): def matrix_rank(input, tol=None, hermitian=False): """ - Return matrix rank of array + Return matrix rank of array. + Rank of the array is the number of singular values of the array that are greater than `tol`. @@ -331,6 +340,7 @@ def multi_dot(arrays, out=None): def norm(x1, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. + This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. @@ -364,11 +374,15 @@ def norm(x1, ord=None, axis=None, keepdims=False): x1_desc = dpnp.get_dpnp_descriptor(x1) if x1_desc: - if not isinstance(axis, int) and not isinstance(axis, tuple) and axis is not None: + if ( + not isinstance(axis, int) + and not isinstance(axis, tuple) + and axis is not None + ): pass elif keepdims is not False: pass - elif ord not in [None, 0, 3, 'fro', 'f']: + elif ord not in [None, 0, 3, "fro", "f"]: pass else: result_obj = dpnp_norm(x1, ord=ord, axis=axis) @@ -379,7 +393,7 @@ def norm(x1, ord=None, axis=None, keepdims=False): return call_origin(numpy.linalg.norm, x1, ord, axis, keepdims) -def qr(x1, mode='reduced'): +def qr(x1, mode="reduced"): """ Compute the qr factorization of a matrix. @@ -397,7 +411,7 @@ def qr(x1, mode='reduced'): x1_desc = dpnp.get_dpnp_descriptor(x1) if x1_desc: - if mode != 'reduced': + if mode != "reduced": pass else: result_tup = dpnp_qr(x1_desc, mode) @@ -468,15 +482,17 @@ def svd(x1, full_matrices=True, compute_uv=True, hermitian=False): if x1_desc: if not x1_desc.ndim == 2: pass - elif not full_matrices == True: + elif not full_matrices: pass - elif not compute_uv == True: + elif not compute_uv: pass - elif not hermitian == False: + elif hermitian: pass else: result_tup = dpnp_svd(x1_desc, full_matrices, compute_uv, hermitian) return result_tup - return call_origin(numpy.linalg.svd, x1, full_matrices, compute_uv, hermitian) + return call_origin( + numpy.linalg.svd, x1, full_matrices, compute_uv, hermitian + ) diff --git a/dpnp/random/dpnp_algo_random.pyx b/dpnp/random/dpnp_algo_random.pyx index 3b01b3d0bf07..fea69c5fb39b 100644 --- a/dpnp/random/dpnp_algo_random.pyx +++ b/dpnp/random/dpnp_algo_random.pyx @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -34,13 +34,13 @@ and the rest of the library import numpy -import dpnp.config as config -from dpnp.dpnp_algo cimport * -cimport dpnp.dpnp_utils as utils +import dpnp.config as config cimport numpy +cimport dpnp.dpnp_utils as utils +from dpnp.dpnp_algo cimport * __all__ = [ "dpnp_rng_beta", @@ -144,7 +144,7 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_laplace_c_1out_t)(c_dpctl.DPCT const double, const size_t, const c_dpctl.DPCTLEventVectorRef) except + -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_logistic_c_1out_t)(c_dpctl.DPCTLSyclQueueRef, void * , +ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_logistic_c_1out_t)(c_dpctl.DPCTLSyclQueueRef, void * , const double, const double, const size_t, @@ -339,7 +339,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_binomial(int ntrial, double p, size): with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) c_dpctl.DPCTLEvent_Delete(event_ref) - + return result @@ -656,7 +656,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_logistic(double loc, double scale, size): cdef fptr_dpnp_rng_logistic_c_1out_t func = < fptr_dpnp_rng_logistic_c_1out_t > kernel_data.ptr # call FPTR function cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, result.get_data(), loc, scale, result.size, NULL) - + with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) c_dpctl.DPCTLEvent_Delete(event_ref) @@ -840,7 +840,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_negative_binomial(double a, double p, size) func = kernel_data.ptr # call FPTR function event_ref = func(q_ref, result.get_data(), a, p, result.size, NULL) - + with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) c_dpctl.DPCTLEvent_Delete(event_ref) @@ -1144,7 +1144,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_rayleigh(double scale, size): q = result_sycl_queue q_ref = q.get_queue_ref() - + func = kernel_data.ptr # call FPTR function event_ref = func(q_ref, result.get_data(), scale, result.size, NULL) diff --git a/dpnp/random/dpnp_iface_random.py b/dpnp/random/dpnp_iface_random.py index 533ba6949955..80fe4421683c 100644 --- a/dpnp/random/dpnp_iface_random.py +++ b/dpnp/random/dpnp_iface_random.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -35,65 +35,64 @@ """ -import dpnp +import operator + import numpy +import dpnp from dpnp.dpnp_algo import * from dpnp.dpnp_utils import * from dpnp.random.dpnp_algo_random import * -import operator - - __all__ = [ - 'beta', - 'binomial', - 'bytes', - 'chisquare', - 'choice', - 'dirichlet', - 'exponential', - 'f', - 'gamma', - 'geometric', - 'gumbel', - 'hypergeometric', - 'laplace', - 'logistic', - 'lognormal', - 'logseries', - 'multinomial', - 'multivariate_normal', - 'negative_binomial', - 'normal', - 'noncentral_chisquare', - 'noncentral_f', - 'pareto', - 'permutation', - 'poisson', - 'power', - 'rand', - 'randint', - 'randn', - 'random', - 'random_integers', - 'random_sample', - 'ranf', - 'rayleigh', - 'sample', - 'shuffle', - 'seed', - 'standard_cauchy', - 'standard_exponential', - 'standard_gamma', - 'standard_normal', - 'standard_t', - 'triangular', - 'uniform', - 'vonmises', - 'wald', - 'weibull', - 'zipf' + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "normal", + "noncentral_chisquare", + "noncentral_f", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "shuffle", + "seed", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", ] @@ -495,9 +494,9 @@ def hypergeometric(ngood, nbad, nsample, size=None): pass else: m = int(ngood) - l = int(ngood) + int(nbad) + p = int(ngood) + int(nbad) s = int(nsample) - return dpnp_rng_hypergeometric(l, s, m, size).get_pyobj() + return dpnp_rng_hypergeometric(p, s, m, size).get_pyobj() return call_origin(numpy.random.hypergeometric, ngood, nbad, nsample, size) @@ -569,7 +568,7 @@ def logistic(loc=0.0, scale=1.0, size=None): pass else: result = dpnp_rng_logistic(loc, scale, size).get_pyobj() - if size == None or size == 1: + if size is None or size == 1: return result[0] else: return result @@ -680,7 +679,7 @@ def multinomial(n, pvals, size=None): return call_origin(numpy.random.multinomial, n, pvals, size) -def multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8): +def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8): """Multivariate normal distributions. Draw random samples from a multivariate normal distribution. @@ -721,9 +720,13 @@ def multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8): else: final_shape = list(shape[:]) final_shape.append(mean_.shape[0]) - return dpnp_rng_multivariate_normal(mean_, cov_, final_shape).get_pyobj() + return dpnp_rng_multivariate_normal( + mean_, cov_, final_shape + ).get_pyobj() - return call_origin(numpy.random.multivariate_normal, mean, cov, size, check_valid, tol) + return call_origin( + numpy.random.multivariate_normal, mean, cov, size, check_valid, tol + ) def negative_binomial(n, p, size=None): @@ -994,8 +997,7 @@ def power(a, size=None): def rand(d0, *dn): """ - Create an array of the given shape and populate it - with random samples from a uniform distribution over [0, 1). + Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1). For full documentation refer to :obj:`numpy.random.rand`. @@ -1118,8 +1120,8 @@ def randn(d0, *dn): def random(size): """ Return random floats in the half-open interval [0.0, 1.0). - Alias for random_sample. + Alias for random_sample. For full documentation refer to :obj:`numpy.random.random`. Limitations @@ -1205,8 +1207,8 @@ def random_sample(size): def ranf(size): """ Return random floats in the half-open interval [0.0, 1.0). - This is an alias of random_sample. + This is an alias of random_sample. For full documentation refer to :obj:`numpy.random.ranf`. Limitations @@ -1266,8 +1268,8 @@ def rayleigh(scale=1.0, size=None): def sample(size): """ Return random floats in the half-open interval [0.0, 1.0). - This is an alias of random_sample. + This is an alias of random_sample. For full documentation refer to :obj:`numpy.random.sample`. Limitations @@ -1567,13 +1569,15 @@ def uniform(low=0.0, high=1.0, size=None): else: if low > high: low, high = high, low - return dpnp_rng_uniform(low, high, size, dtype=numpy.float64).get_pyobj() + return dpnp_rng_uniform( + low, high, size, dtype=numpy.float64 + ).get_pyobj() return call_origin(numpy.random.uniform, low, high, size) def vonmises(mu, kappa, size=None): - """von Mises distribution. + """ Draw samples from a von Mises distribution. diff --git a/dpnp/to_numba/dpnp_iface_to_numba.py b/dpnp/to_numba/dpnp_iface_to_numba.py index 77b9a957ffab..385f72a91f7d 100644 --- a/dpnp/to_numba/dpnp_iface_to_numba.py +++ b/dpnp/to_numba/dpnp_iface_to_numba.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,23 +40,21 @@ import numba from numba.extending import get_cython_function_address as nba_addr - -name_to_numba_signatures = { - "cos": [(numba.types.float64)] -} +name_to_numba_signatures = {"cos": [(numba.types.float64)]} name_and_types_to_pointer = { - ("cos", numba.types.float64): ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(nba_addr("dpnp.dpnp_algo", "dpnp_cos")) + ("cos", numba.types.float64): ctypes.CFUNCTYPE( + ctypes.c_double, ctypes.c_double + )(nba_addr("dpnp.dpnp_algo", "dpnp_cos")) } def choose_kernel(name, all_signatures): - def choice_function(*args): for signature in all_signatures: if args == signature: f = name_and_types_to_pointer[(name, *signature)] - return lambda *args: f(*args) + return lambda *args, f=f: f(*args) return choice_function @@ -65,4 +63,6 @@ def add_overloads(): for name, all_signatures in name_to_numba_signatures.items(): sc_function = getattr(sc, name) print(f"sc_function={sc_function}") - numba.extending.overload(sc_function)(choose_kernel(name, all_signatures)) + numba.extending.overload(sc_function)( + choose_kernel(name, all_signatures) + ) diff --git a/dpnp/version.py b/dpnp/version.py index 9e0bcf9c1836..640ea30a6247 100644 --- a/dpnp/version.py +++ b/dpnp/version.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -29,6 +29,6 @@ DPNP version module """ -__version__: str = '0.10.1' +__version__: str = "0.10.1" version: str = __version__ diff --git a/examples/example1.py b/examples/example1.py index d9c9369a6b20..1705a928f7e9 100755 --- a/examples/example1.py +++ b/examples/example1.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -39,24 +39,29 @@ try: import dpnp except ImportError: - import sys import os + import sys root_dir = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.append(root_dir) import dpnp -import numpy import time +import numpy + def run_dgemm(executor, name, size, test_type, repetition): - x1 = executor.reshape(executor.arange(size * size, dtype=test_type), (size, size)) - x2 = executor.reshape(executor.arange(size * size, dtype=test_type), (size, size)) + x1 = executor.reshape( + executor.arange(size * size, dtype=test_type), (size, size) + ) + x2 = executor.reshape( + executor.arange(size * size, dtype=test_type), (size, size) + ) times = [] - for iteration in range(repetition): + for _ in range(repetition): start_time = time.perf_counter() result = executor.matmul(x1, x2) # print("result[5]=%f" % (result.item(5))) @@ -70,15 +75,21 @@ def run_dgemm(executor, name, size, test_type, repetition): return (min_time, med_time, max_time), result.item(5) -if __name__ == '__main__': +if __name__ == "__main__": test_repetition = 5 for test_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32]: type_name = numpy.dtype(test_type).name - print(f"...Test data type is {test_type}, each test repetitions {test_repetition}") + print( + f"...Test data type is {test_type}, each test repetitions {test_repetition}" + ) for size in [16, 32, 64, 128]: # , 256, 512, 1024, 2048, 4096]: - times_python, result_python = run_dgemm(numpy, "", size, test_type, test_repetition) - times_sycl, result_mkl = run_dgemm(dpnp, " ", size, test_type, test_repetition) + times_python, result_python = run_dgemm( + numpy, "", size, test_type, test_repetition + ) + times_sycl, result_mkl = run_dgemm( + dpnp, " ", size, test_type, test_repetition + ) verification = False if result_mkl == result_python: diff --git a/examples/example10.py b/examples/example10.py index cdf8aa1e0e0d..2373992c9941 100644 --- a/examples/example10.py +++ b/examples/example10.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,14 +40,16 @@ import os import sys - sys.path.insert(0, os.path.abspath('.')) + sys.path.insert(0, os.path.abspath(".")) import dpnp import numpy def run(executor, size, test_type, repetition): - x = executor.reshape(executor.arange(size * size, dtype=test_type), (size, size)) + x = executor.reshape( + executor.arange(size * size, dtype=test_type), (size, size) + ) times = [] for _ in range(repetition): @@ -63,10 +65,14 @@ def example(): test_repetition = 5 for test_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32]: type_name = numpy.dtype(test_type).name - print(f"...Test data type is {type_name}, each test repetitions {test_repetition}") + print( + f"...Test data type is {type_name}, each test repetitions {test_repetition}" + ) for size in [64, 128, 256, 512, 1024, 2048, 4096]: - time_numpy, result_numpy = run(numpy, size, test_type, test_repetition) + time_numpy, result_numpy = run( + numpy, size, test_type, test_repetition + ) time_dpnp, result_dpnp = run(dpnp, size, test_type, test_repetition) if result_dpnp == result_numpy: diff --git a/examples/example2.py b/examples/example2.py index 92476599a563..ae0f2e653f7b 100755 --- a/examples/example2.py +++ b/examples/example2.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -39,17 +39,18 @@ try: import dpnp except ImportError: - import sys import os + import sys root_dir = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.append(root_dir) import dpnp -import numpy import time +import numpy + common_function_one_input = numpy.sin """ Fixed third party function @@ -62,7 +63,7 @@ def get_package_specific_input_data_type(input_type, size): def run_third_party_function(input, repetition): times = [] - for iteration in range(repetition): + for _ in range(repetition): start_time = time.time() result = common_function_one_input(input) end_time = time.time() @@ -72,14 +73,20 @@ def run_third_party_function(input, repetition): return execution_time, result.item(5) -if __name__ == '__main__': +if __name__ == "__main__": test_repetition = 5 for input_type in [numpy, dpnp]: type_name = input_type.__name__ - print(f"...Test data type is {type_name}, each test repetitions {test_repetition}") + print( + f"...Test data type is {type_name}, each test repetitions {test_repetition}" + ) for size in [2048, 4096, 8192, 16384, 32768, 65536]: input_data = get_package_specific_input_data_type(input_type, size) - result_time, result = run_third_party_function(input_data, test_repetition) + result_time, result = run_third_party_function( + input_data, test_repetition + ) - print(f"type:{type_name}:N:{size:6}:Time:{result_time:.3e}:result:{result:.3e}") + print( + f"type:{type_name}:N:{size:6}:Time:{result_time:.3e}:result:{result:.3e}" + ) diff --git a/examples/example4.py b/examples/example4.py index 0790f84d10aa..64a2c0034c31 100755 --- a/examples/example4.py +++ b/examples/example4.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -38,20 +38,48 @@ """ Unary functions """ -for function in [numpy.sqrt, numpy.fabs, numpy.reciprocal, numpy.square, numpy.cbrt, numpy.degrees, numpy.radians]: +for function in [ + numpy.sqrt, + numpy.fabs, + numpy.reciprocal, + numpy.square, + numpy.cbrt, + numpy.degrees, + numpy.radians, +]: print() - for test_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool]: + for test_type in [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + ]: data = numpy.array([1, 2, 3, 4], dtype=test_type) result = function(data) - print(f"input:{data.dtype.name:10}: outout:{result.dtype.name:10}: name:{function.__name__}") + print( + f"input:{data.dtype.name:10}: outout:{result.dtype.name:10}: name:{function.__name__}" + ) """ Two arguments functions """ for function in [numpy.equal, numpy.arctan2]: print() - for input1_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool]: - for input2_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool]: + for input1_type in [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + ]: + for input2_type in [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + ]: data1 = numpy.array([1, 2, 3, 4], dtype=input1_type) data2 = numpy.array([11, 21, 31, 41], dtype=input2_type) result = function(data1, data2) diff --git a/examples/example6.py b/examples/example6.py index 9319bfe6a664..e523d4fcc88a 100644 --- a/examples/example6.py +++ b/examples/example6.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -36,19 +36,19 @@ try: import dpnp except ImportError: - import sys import os + import sys root_dir = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.append(root_dir) import dpnp -import numpy import time +import numpy -if __name__ == '__main__': +if __name__ == "__main__": # TODO # as is example1.py result = dpnp.random.randn(10) diff --git a/examples/example7.py b/examples/example7.py index fbf73f3aff76..0e8f74f973bf 100755 --- a/examples/example7.py +++ b/examples/example7.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -39,23 +39,26 @@ try: import dpnp except ImportError: - import sys import os + import sys root_dir = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.append(root_dir) import dpnp -import numpy import time +import numpy + def run_function(executor, name, size, test_type, repetition): - x = executor.reshape(executor.arange(size * size, dtype=test_type), (size, size)) + x = executor.reshape( + executor.arange(size * size, dtype=test_type), (size, size) + ) times = [] - for iteration in range(repetition): + for _ in range(repetition): start_time = time.perf_counter() result = executor.sum(x) # print("result[5]=%f" % (result.item(5))) @@ -66,15 +69,21 @@ def run_function(executor, name, size, test_type, repetition): return execution_time, result -if __name__ == '__main__': +if __name__ == "__main__": test_repetition = 5 for test_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32]: type_name = numpy.dtype(test_type).name - print(f"...Test data type is {test_type}, each test repetitions {test_repetition}") + print( + f"...Test data type is {test_type}, each test repetitions {test_repetition}" + ) for size in [16, 32, 64, 128, 256, 512, 1024, 2048, 4096]: - time_python, result_python = run_function(numpy, "", size, test_type, test_repetition) - time_mkl, result_mkl = run_function(dpnp, " ", size, test_type, test_repetition) + time_python, result_python = run_function( + numpy, "", size, test_type, test_repetition + ) + time_mkl, result_mkl = run_function( + dpnp, " ", size, test_type, test_repetition + ) if result_mkl == result_python: verification = True diff --git a/examples/example_bs.py b/examples/example_bs.py index 0dd298faf472..d806a8833646 100644 --- a/examples/example_bs.py +++ b/examples/example_bs.py @@ -1,7 +1,7 @@ # cython: language_level=3 # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -35,8 +35,8 @@ try: import dpnp as np except ImportError: - import sys import os + import sys root_dir = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.append(root_dir) @@ -44,7 +44,7 @@ import dpnp as np -SIZE = 2 ** 8 +SIZE = 2**8 DTYPE = np.float64 SEED = 7777777 @@ -68,7 +68,7 @@ def black_scholes(price, strike, t, rate, vol, call, put): z = T * sig_sig_two c = 0.25 * z - y = 1. / np.sqrt(z) + y = 1.0 / np.sqrt(z) w1 = (a - b + c) * y w2 = (a - b - c) * y diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000000..0ef55919aac5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,11 @@ +[tool.black] +line-length = 80 + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 80 +color_output = true diff --git a/scripts/azure-pipelines.yml b/scripts/azure-pipelines.yml index bcc9286080dd..5ee4b0420d32 100644 --- a/scripts/azure-pipelines.yml +++ b/scripts/azure-pipelines.yml @@ -155,7 +155,7 @@ jobs: # - bash: | # echo ========================= install OneAPI ================================= # ./scripts/install_system_deps.sh -# +# # echo ========================= Conda ENV ====================================== # conda create -q -y -n dpnp$(python.version) python=$(python.version) numpy=1.19 conda-build cython pytest hypothesis # . /usr/share/miniconda/etc/profile.d/conda.sh diff --git a/scripts/build_deps_dpctl.sh b/scripts/build_deps_dpctl.sh index 990a6162752e..3d5331bbdfb8 100755 --- a/scripts/build_deps_dpctl.sh +++ b/scripts/build_deps_dpctl.sh @@ -4,7 +4,7 @@ THEDIR=$(dirname $(readlink -e ${BASH_SOURCE[0]})) DPCTL_TARGET_VERSION=0.5.0rc2 echo ++++++++++++++++++ Build DPCTL ${DPCTL_TARGET_VERSION} +++++++++++++++++++ -git clone --branch ${DPCTL_TARGET_VERSION} https://github.com/IntelPython/dpctl.git +git clone --branch ${DPCTL_TARGET_VERSION} https://github.com/IntelPython/dpctl.git cd dpctl diff --git a/scripts/build_deps_dpctl_win.bat b/scripts/build_deps_dpctl_win.bat index 464bb74c2ae4..25f592908296 100755 --- a/scripts/build_deps_dpctl_win.bat +++ b/scripts/build_deps_dpctl_win.bat @@ -1,4 +1,4 @@ -rem git clone --branch 0.5.0rc2 https://github.com/IntelPython/dpctl.git +rem git clone --branch 0.5.0rc2 https://github.com/IntelPython/dpctl.git rem for /f "tokens=* delims=" %%a in ('git tag -l') do git tag -d %%a rem git tag 0.5.0rc2 @@ -11,7 +11,7 @@ set DPCTL_DIST=%CD%\dist_dpctl call conda uninstall -y dpctl echo +++++++++++++++++++++++++ Downlowd DPCTL +++++++++++++++++++++++++++ -call git clone https://github.com/IntelPython/dpctl.git +call git clone https://github.com/IntelPython/dpctl.git cd dpctl set "ONEAPI_ROOT=C:\Program Files (x86)\Intel\oneAPI\" diff --git a/scripts/install_system_deps.sh b/scripts/install_system_deps.sh index ce7104ed1a31..cf62a632da00 100755 --- a/scripts/install_system_deps.sh +++ b/scripts/install_system_deps.sh @@ -5,7 +5,7 @@ THEDIR=$(dirname $(readlink -e ${BASH_SOURCE[0]})) # echo +++++++++++++++++++++++++ System prerequisites +++++++++++++++++++++++++++ # sudo apt-get install -f # sudo dpkg --configure -a -# sudo apt-get install -f +# sudo apt-get install -f # sudo apt-get clean # sudo apt-get autoclean @@ -76,7 +76,7 @@ sudo aptitude install -y cmake valgrind libgtest-dev #echo ========================= install/delete libstdc++-dev =================== #sudo apt remove -y gcc-7 g++-7 gcc-8 g++-8 gcc-10 g++-10 -# oneapi beta 10 can not work with libstdc++-10-dev +# oneapi beta 10 can not work with libstdc++-10-dev #sudo apt remove -y libstdc++-10-dev #sudo apt autoremove #sudo apt install --reinstall -y gcc-9 g++-9 libstdc++-9-dev diff --git a/setup.py b/setup.py index b7bfba6c28ba..9b403197a7da 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,7 +25,8 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -""" NumPy is the fundamental package for array computing with Python. +""" +NumPy is the fundamental package for array computing with Python. It provides: @@ -38,20 +39,26 @@ """ import importlib.machinery as imm # Python 3 is required -import sys import os +import sys + import dpctl import numpy - -from setuptools import setup, Extension from Cython.Build import cythonize from Cython.Compiler import Options as cython_options +from setuptools import Extension, setup -from utils.command_style import source_style -from utils.command_clean import source_clean -from utils.command_build_clib import custom_build_clib, dpnp_backend_c_description, _project_backend_dir, _sdl_cflags, _project_extra_link_args, IS_WIN +from utils.command_build_clib import ( + IS_WIN, + _project_backend_dir, + _project_extra_link_args, + _sdl_cflags, + custom_build_clib, + dpnp_backend_c_description, +) from utils.command_build_cmake_clib import custom_build_cmake_clib - +from utils.command_clean import source_clean +from utils.command_style import source_style """ Python version check @@ -64,14 +71,16 @@ Get the project version """ thefile_path = os.path.abspath(os.path.dirname(__file__)) -version_mod = imm.SourceFileLoader('version', os.path.join(thefile_path, 'dpnp', 'version.py')).load_module() +version_mod = imm.SourceFileLoader( + "version", os.path.join(thefile_path, "dpnp", "version.py") +).load_module() __version__ = version_mod.__version__ """ Set project auxilary data like readme and licence files """ -with open('README.md') as f: +with open("README.md") as f: __readme_file__ = f.read() CLASSIFIERS = """\ @@ -109,118 +118,139 @@ # TODO: refactor/fix # on Win we need a specific build_clib definition to prevent using cmake during build_ext execution if IS_WIN: - dpnp_build_commands = {'style': source_style, - 'build_clib_setuptools': custom_build_clib, - 'build_clib': custom_build_clib, - 'clean': source_clean - } + dpnp_build_commands = { + "style": source_style, + "build_clib_setuptools": custom_build_clib, + "build_clib": custom_build_clib, + "clean": source_clean, + } else: - dpnp_build_commands = {'style': source_style, - 'build_clib_setuptools': custom_build_clib, - 'build_clib': custom_build_cmake_clib, - 'clean': source_clean - } + dpnp_build_commands = { + "style": source_style, + "build_clib_setuptools": custom_build_clib, + "build_clib": custom_build_cmake_clib, + "clean": source_clean, + } if IS_WIN: - ''' + """ This variable controls setuptools execution on windows to avoid automatically search and confirm workability of the compiler If not set, error "Microsoft Visual C++ 14.0 or greater is required." appiars - ''' + """ os.environ["DISTUTILS_USE_SDK"] = "1" """ The project modules description """ kwargs_common = { - "include_dirs": [numpy.get_include(), dpctl.get_include()] + _project_backend_dir, - "library_dirs": [os.path.dirname(dpctl.get_include()),], + "include_dirs": [numpy.get_include(), dpctl.get_include()] + + _project_backend_dir, + "library_dirs": [ + os.path.dirname(dpctl.get_include()), + ], "libraries": ["DPCTLSyclInterface"], "extra_compile_args": _sdl_cflags, "extra_link_args": _project_extra_link_args, "define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")], - "language": "c++" + "language": "c++", } dpnp_algo = Extension( name="dpnp.dpnp_algo.dpnp_algo", sources=[os.path.join("dpnp", "dpnp_algo", "dpnp_algo.pyx")], - **kwargs_common) + **kwargs_common +) dpnp_dparray = Extension( name="dpnp.dparray", sources=[os.path.join("dpnp", "dparray.pyx")], - **kwargs_common) + **kwargs_common +) dpnp_random = Extension( name="dpnp.random.dpnp_algo_random", sources=[os.path.join("dpnp", "random", "dpnp_algo_random.pyx")], - **kwargs_common) + **kwargs_common +) dpnp_linalg = Extension( name="dpnp.linalg.dpnp_algo_linalg", sources=[os.path.join("dpnp", "linalg", "dpnp_algo_linalg.pyx")], - **kwargs_common) + **kwargs_common +) dpnp_fft = Extension( name="dpnp.fft.dpnp_algo_fft", sources=[os.path.join("dpnp", "fft", "dpnp_algo_fft.pyx")], - **kwargs_common) + **kwargs_common +) dpnp_utils = Extension( name="dpnp.dpnp_utils.dpnp_algo_utils", sources=[os.path.join("dpnp", "dpnp_utils", "dpnp_algo_utils.pyx")], - **kwargs_common) + **kwargs_common +) cython_options.docstrings = True cython_options.warning_errors = True -dpnp_cython_mods = cythonize([dpnp_algo, dpnp_dparray, dpnp_random, dpnp_utils, dpnp_linalg, dpnp_fft], - compiler_directives={"language_level": sys.version_info[0], - "warn.unused": False, - "warn.unused_result": False, - "warn.maybe_uninitialized": False, - "warn.undeclared": False, - "boundscheck": True, - "linetrace": True - }, - gdb_debug=False, - build_dir="build_cython", - annotate=False, - quiet=False) - -setup(name="dpnp", - version=__version__, - description="NumPy-like API accelerated with SYCL", - long_description=__readme_file__, - long_description_content_type="text/markdown", - author="Intel Corporation", - maintainer="Intel Corp.", - maintainer_email="scripting@intel.com", - url="https://intelpython.github.io/dpnp/", - download_url="https://github.com/IntelPython/dpnp", - license='BSD', - classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], - keywords="sycl numpy python3 intel mkl oneapi gpu dpcpp pstl", - platforms=["Linux", "Windows"], - test_suite="pytest", - python_requires=">=3.6", - install_requires=["numpy>=1.15"], - setup_requires=["numpy>=1.15"], - tests_require=["numpy>=1.15"], - ext_modules=dpnp_cython_mods, - cmdclass=dpnp_build_commands, - packages=['dpnp', - 'dpnp.dpnp_algo', - 'dpnp.dpnp_utils', - 'dpnp.fft', - 'dpnp.linalg', - 'dpnp.random' - ], - package_data={'dpnp': ['libdpnp_backend_c.so', 'dpnp_backend_c.lib', 'dpnp_backend_c.dll']}, - include_package_data=True, - - # this is needed for 'build' command to automatically call 'build_clib' - # it attach the library to all extensions (it is not needed) - libraries=dpnp_backend_c_description - ) +dpnp_cython_mods = cythonize( + [dpnp_algo, dpnp_dparray, dpnp_random, dpnp_utils, dpnp_linalg, dpnp_fft], + compiler_directives={ + "language_level": sys.version_info[0], + "warn.unused": False, + "warn.unused_result": False, + "warn.maybe_uninitialized": False, + "warn.undeclared": False, + "boundscheck": True, + "linetrace": True, + }, + gdb_debug=False, + build_dir="build_cython", + annotate=False, + quiet=False, +) + +setup( + name="dpnp", + version=__version__, + description="NumPy-like API accelerated with SYCL", + long_description=__readme_file__, + long_description_content_type="text/markdown", + author="Intel Corporation", + maintainer="Intel Corp.", + maintainer_email="scripting@intel.com", + url="https://intelpython.github.io/dpnp/", + download_url="https://github.com/IntelPython/dpnp", + license="BSD", + classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f], + keywords="sycl numpy python3 intel mkl oneapi gpu dpcpp pstl", + platforms=["Linux", "Windows"], + test_suite="pytest", + python_requires=">=3.6", + install_requires=["numpy>=1.15"], + setup_requires=["numpy>=1.15"], + tests_require=["numpy>=1.15"], + ext_modules=dpnp_cython_mods, + cmdclass=dpnp_build_commands, + packages=[ + "dpnp", + "dpnp.dpnp_algo", + "dpnp.dpnp_utils", + "dpnp.fft", + "dpnp.linalg", + "dpnp.random", + ], + package_data={ + "dpnp": [ + "libdpnp_backend_c.so", + "dpnp_backend_c.lib", + "dpnp_backend_c.dll", + ] + }, + include_package_data=True, + # this is needed for 'build' command to automatically call 'build_clib' + # it attach the library to all extensions (it is not needed) + libraries=dpnp_backend_c_description, +) diff --git a/tests/__init__.py b/tests/__init__.py index e4085539a910..94b779059738 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,9 +1,8 @@ -from tests.third_party.cupy import testing as cupy_testing -import dpnp import numpy +import dpnp from tests import testing - +from tests.third_party.cupy import testing as cupy_testing numpy.testing.assert_allclose = testing.assert_allclose numpy.testing.assert_array_equal = testing.assert_array_equal @@ -16,13 +15,18 @@ orig_shaped_reverse_arange = cupy_testing.shaped_reverse_arange -def _shaped_arange(shape, xp=dpnp, dtype=dpnp.float64, order='C'): - res = xp.array(orig_shaped_arange(shape, xp=numpy, dtype=dtype, order=order), dtype=dtype) +def _shaped_arange(shape, xp=dpnp, dtype=dpnp.float64, order="C"): + res = xp.array( + orig_shaped_arange(shape, xp=numpy, dtype=dtype, order=order), + dtype=dtype, + ) return res def _shaped_reverse_arange(shape, xp=dpnp, dtype=dpnp.float32): - res = xp.array(orig_shaped_reverse_arange(shape, xp=numpy, dtype=dtype), dtype=dtype) + res = xp.array( + orig_shaped_reverse_arange(shape, xp=numpy, dtype=dtype), dtype=dtype + ) return res diff --git a/tests/conftest.py b/tests/conftest.py index d9cbbb593e36..e3ebebb51d89 100755 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -26,12 +26,13 @@ import os import sys -import dpnp + import numpy import pytest +import dpnp -skip_mark = pytest.mark.skip(reason='Skipping test.') +skip_mark = pytest.mark.skip(reason="Skipping test.") def get_excluded_tests(test_exclude_file): @@ -46,10 +47,10 @@ def pytest_collection_modifyitems(config, items): test_path = os.path.split(__file__)[0] excluded_tests = [] # global skip file - test_exclude_file = os.path.join(test_path, 'skipped_tests.tbl') + test_exclude_file = os.path.join(test_path, "skipped_tests.tbl") # global skip file, where gpu device is not supported - test_exclude_file_gpu = os.path.join(test_path, 'skipped_tests_gpu.tbl') + test_exclude_file_gpu = os.path.join(test_path, "skipped_tests_gpu.tbl") current_queue_is_cpu = dpnp.dpnp_queue_is_cpu() print("") @@ -58,14 +59,14 @@ def pytest_collection_modifyitems(config, items): print(f"NumPy version: {numpy.__version__}, location: {numpy}") print(f"Python version: {sys.version}") print("") - if not current_queue_is_cpu or os.getenv('DPNP_QUEUE_GPU') == '1': + if not current_queue_is_cpu or os.getenv("DPNP_QUEUE_GPU") == "1": excluded_tests.extend(get_excluded_tests(test_exclude_file_gpu)) else: excluded_tests.extend(get_excluded_tests(test_exclude_file)) for item in items: # some test name contains '\n' in the parameters - test_name = item.nodeid.replace('\n', '').strip() + test_name = item.nodeid.replace("\n", "").strip() for item_tbl in excluded_tests: # remove end-of-line character diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index bbf3c1c3b535..be4e7feccf45 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -341,7 +341,7 @@ tests/third_party/cupy/creation_tests/test_from_data.py::TestArrayPreservationOf tests/third_party/cupy/creation_tests/test_from_data.py::TestArrayPreservationOfShape_param_7_{copy=True, ndmin=3, xp=dpnp}::test_cupy_array tests/third_party/cupy/creation_tests/test_from_data.py::TestArrayPreservationOfShape_param_8_{copy=False, ndmin=0, xp=numpy}::test_cupy_array tests/third_party/cupy/creation_tests/test_from_data.py::TestArrayPreservationOfShape_param_9_{copy=False, ndmin=0, xp=dpnp}::test_cupy_array -tests/third_party/cupy/creation_tests/test_from_data.py::TestFromData::test_array_copy_is_copied +tests/third_party/cupy/creation_tests/test_from_data.py::TestFromData::test_array_copy_is_copied tests/third_party/cupy/creation_tests/test_from_data.py::TestFromData::test_array_copy_list_of_cupy_with_dtype tests/third_party/cupy/creation_tests/test_from_data.py::TestFromData::test_array_copy_list_of_cupy_with_dtype_char tests/third_party/cupy/creation_tests/test_from_data.py::TestFromData::test_array_copy_list_of_numpy_with_dtype diff --git a/tests/test_absolute.py b/tests/test_absolute.py index aa145cc92023..1903ea42b950 100644 --- a/tests/test_absolute.py +++ b/tests/test_absolute.py @@ -1,13 +1,10 @@ +import numpy import pytest import dpnp as inp -import numpy - -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_abs_int(type): a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9]) ia = inp.array(a) @@ -17,9 +14,7 @@ def test_abs_int(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_absolute_int(type): a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9]) ia = inp.array(a) @@ -29,11 +24,9 @@ def test_absolute_int(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64], - ids=['float64']) +@pytest.mark.parametrize("type", [numpy.float64], ids=["float64"]) def test_absolute_float(type): - a = numpy.array([[-2., 3., 9.1], [-2., 5.0, -2], [1.0, -2., 5.0]]) + a = numpy.array([[-2.0, 3.0, 9.1], [-2.0, 5.0, -2], [1.0, -2.0, 5.0]]) ia = inp.array(a) result = inp.absolute(ia) @@ -41,11 +34,15 @@ def test_absolute_float(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64], - ids=['float64']) +@pytest.mark.parametrize("type", [numpy.float64], ids=["float64"]) def test_absolute_float_3(type): - a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]]) + a = numpy.array( + [ + [[-2.0, 3.0], [9.1, 0.2]], + [[-2.0, 5.0], [-2, -1.2]], + [[1.0, -2.0], [5.0, -1.1]], + ] + ) ia = inp.array(a) result = inp.absolute(ia) diff --git a/tests/test_amin_amax.py b/tests/test_amin_amax.py index 90c92138e81e..e9af55dfe406 100644 --- a/tests/test_amin_amax.py +++ b/tests/test_amin_amax.py @@ -1,15 +1,18 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("type", - [numpy.float64], - ids=['float64']) +@pytest.mark.parametrize("type", [numpy.float64], ids=["float64"]) def test_amax_float64(type): - a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]]) + a = numpy.array( + [ + [[-2.0, 3.0], [9.1, 0.2]], + [[-2.0, 5.0], [-2, -1.2]], + [[1.0, -2.0], [5.0, -1.1]], + ] + ) ia = dpnp.array(a) for axis in range(len(a)): @@ -18,9 +21,7 @@ def test_amax_float64(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_amax_int(type): a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9]) ia = dpnp.array(a) @@ -30,11 +31,15 @@ def test_amax_int(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64], - ids=['float64']) +@pytest.mark.parametrize("type", [numpy.float64], ids=["float64"]) def test_amin_float64(type): - a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]]) + a = numpy.array( + [ + [[-2.0, 3.0], [9.1, 0.2]], + [[-2.0, 5.0], [-2, -1.2]], + [[1.0, -2.0], [5.0, -1.1]], + ] + ) ia = dpnp.array(a) for axis in range(len(a)): @@ -43,9 +48,7 @@ def test_amin_float64(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_amin_int(type): a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9]) ia = dpnp.array(a) @@ -67,12 +70,14 @@ def _get_min_max_input(type, shape): return a.reshape(shape) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("shape", - [(4,), (2, 3), (4, 5, 6)], - ids=['(4,)', '(2,3)', '(4,5,6)']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "shape", [(4,), (2, 3), (4, 5, 6)], ids=["(4,)", "(2,3)", "(4,5,6)"] +) def test_amax(type, shape): a = _get_min_max_input(type, shape) @@ -87,12 +92,14 @@ def test_amax(type, shape): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("shape", - [(4,), (2, 3), (4, 5, 6)], - ids=['(4,)', '(2,3)', '(4,5,6)']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "shape", [(4,), (2, 3), (4, 5, 6)], ids=["(4,)", "(2,3)", "(4,5,6)"] +) def test_amin(type, shape): a = _get_min_max_input(type, shape) diff --git a/tests/test_arithmetic.py b/tests/test_arithmetic.py index 6eb635e57730..71daa7f38dac 100644 --- a/tests/test_arithmetic.py +++ b/tests/test_arithmetic.py @@ -4,7 +4,6 @@ class TestArithmetic(unittest.TestCase): - @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_modf_part1(self, xp, dtype): diff --git a/tests/test_arraycreation.py b/tests/test_arraycreation.py index 723817c319e3..3c268053ee16 100644 --- a/tests/test_arraycreation.py +++ b/tests/test_arraycreation.py @@ -1,30 +1,37 @@ -import pytest - -import dpnp +import tempfile import numpy +import pytest -import tempfile +import dpnp -@pytest.mark.parametrize("k", - [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6], - ids=['-6', '-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5', '6']) -@pytest.mark.parametrize("v", - [[0, 1, 2, 3, 4], - [1, 1, 1, 1, 1], - [[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]], - ids=['[0, 1, 2, 3, 4]', - '[1, 1, 1, 1, 1]', - '[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]']) +@pytest.mark.parametrize( + "k", + [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6], + ids=["-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6"], +) +@pytest.mark.parametrize( + "v", + [ + [0, 1, 2, 3, 4], + [1, 1, 1, 1, 1], + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[0, 1, 2, 3, 4]", + "[1, 1, 1, 1, 1]", + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) def test_diag(v, k): a = numpy.array(v) ia = dpnp.array(a) @@ -33,29 +40,33 @@ def test_diag(v, k): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("N", - [0, 1, 2, 3, 4], - ids=['0', '1', '2', '3', '4']) -@pytest.mark.parametrize("M", - [None, 0, 1, 2, 3, 4], - ids=['None', '0', '1', '2', '3', '4']) -@pytest.mark.parametrize("k", - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], - ids=['-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize("N", [0, 1, 2, 3, 4], ids=["0", "1", "2", "3", "4"]) +@pytest.mark.parametrize( + "M", [None, 0, 1, 2, 3, 4], ids=["None", "0", "1", "2", "3", "4"] +) +@pytest.mark.parametrize( + "k", + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], + ids=["-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5"], +) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_eye(N, M, k, dtype): expected = numpy.eye(N, M=M, k=k, dtype=dtype) result = dpnp.eye(N, M=M, k=k, dtype=dtype) numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_frombuffer(type): - buffer = b'12345678' + buffer = b"12345678" np_res = numpy.frombuffer(buffer, dtype=type) dpnp_res = dpnp.frombuffer(buffer, dtype=type) @@ -63,9 +74,11 @@ def test_frombuffer(type): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_fromfile(type): with tempfile.TemporaryFile() as fh: fh.write(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08") @@ -79,9 +92,11 @@ def test_fromfile(type): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_fromfunction(type): def func(x, y): return x * y @@ -94,9 +109,11 @@ def func(x, y): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_fromiter(type): iter = [1, 2, 3, 4] @@ -106,25 +123,27 @@ def test_fromiter(type): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_fromstring(type): string = "1 2 3 4" - np_res = numpy.fromstring(string, dtype=type, sep=' ') - dpnp_res = dpnp.fromstring(string, dtype=type, sep=' ') + np_res = numpy.fromstring(string, dtype=type, sep=" ") + dpnp_res = dpnp.fromstring(string, dtype=type, sep=" ") numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("num", - [2, 4, 8, 3, 9, 27]) -@pytest.mark.parametrize("endpoint", - [True, False]) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("num", [2, 4, 8, 3, 9, 27]) +@pytest.mark.parametrize("endpoint", [True, False]) def test_geomspace(type, num, endpoint): start = 2 stop = 256 @@ -140,22 +159,31 @@ def test_geomspace(type, num, endpoint): numpy.testing.assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("n", - [0, 1, 4], - ids=['0', '1', '4']) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, - numpy.int32, numpy.bool, numpy.complex128, None], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'complex128', 'None']) +@pytest.mark.parametrize("n", [0, 1, 4], ids=["0", "1", "4"]) +@pytest.mark.parametrize( + "type", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.complex128, + None, + ], + ids=["float64", "float32", "int64", "int32", "bool", "complex128", "None"], +) def test_identity(n, type): expected = numpy.identity(n, dtype=type) result = dpnp.identity(n, dtype=type) numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_loadtxt(type): with tempfile.TemporaryFile() as fh: fh.write(b"1 2 3 4") @@ -169,35 +197,51 @@ def test_loadtxt(type): numpy.testing.assert_array_equal(dpnp_res, np_res) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("offset", - [0, 1], - ids=['0', '1']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]], - [[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [ - [[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]], - [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]', - '[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]', - '[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]']) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("offset", [0, 1], ids=["0", "1"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + [ + [[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], + [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]], + ], + [ + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], + [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + "[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]", + "[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]", + ], +) def test_trace(array, offset, type, dtype): a = numpy.array(array, type) ia = dpnp.array(array, type) @@ -206,18 +250,36 @@ def test_trace(array, offset, type, dtype): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("N", - [0, 1, 2, 3, 4], - ids=['0', '1', '2', '3', '4']) -@pytest.mark.parametrize("M", - [0, 1, 2, 3, 4], - ids=['0', '1', '2', '3', '4']) -@pytest.mark.parametrize("k", - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], - ids=['-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, float, numpy.int64, numpy.int32, numpy.int, numpy.float, int], - ids=['float64', 'float32', 'numpy.float', 'float', 'int64', 'int32', 'numpy.int', 'int']) +@pytest.mark.parametrize("N", [0, 1, 2, 3, 4], ids=["0", "1", "2", "3", "4"]) +@pytest.mark.parametrize("M", [0, 1, 2, 3, 4], ids=["0", "1", "2", "3", "4"]) +@pytest.mark.parametrize( + "k", + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], + ids=["-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5"], +) +@pytest.mark.parametrize( + "type", + [ + numpy.float64, + numpy.float32, + float, + numpy.int64, + numpy.int32, + numpy.int, + numpy.float, + int, + ], + ids=[ + "float64", + "float32", + "numpy.float", + "float", + "int64", + "int32", + "numpy.int", + "int", + ], +) def test_tri(N, M, k, type): expected = numpy.tri(N, M, k, dtype=type) result = dpnp.tri(N, M, k, dtype=type) @@ -230,24 +292,32 @@ def test_tri_default_dtype(): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("k", - [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6], - ids=['-6', '-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5', '6']) -@pytest.mark.parametrize("m", - [[0, 1, 2, 3, 4], - [1, 1, 1, 1, 1], - [[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]], - ids=['[0, 1, 2, 3, 4]', - '[1, 1, 1, 1, 1]', - '[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]']) +@pytest.mark.parametrize( + "k", + [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6], + ids=["-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6"], +) +@pytest.mark.parametrize( + "m", + [ + [0, 1, 2, 3, 4], + [1, 1, 1, 1, 1], + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[0, 1, 2, 3, 4]", + "[1, 1, 1, 1, 1]", + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) def test_tril(m, k): a = numpy.array(m) ia = dpnp.array(a) @@ -256,18 +326,26 @@ def test_tril(m, k): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("k", - [-4, -3, -2, -1, 0, 1, 2, 3, 4], - ids=['-4', '-3', '-2', '-1', '0', '1', '2', '3', '4']) -@pytest.mark.parametrize("m", - [[0, 1, 2, 3, 4], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]], - ids=['[0, 1, 2, 3, 4]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]']) +@pytest.mark.parametrize( + "k", + [-4, -3, -2, -1, 0, 1, 2, 3, 4], + ids=["-4", "-3", "-2", "-1", "0", "1", "2", "3", "4"], +) +@pytest.mark.parametrize( + "m", + [ + [0, 1, 2, 3, 4], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + ], + ids=[ + "[0, 1, 2, 3, 4]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + ], +) def test_triu(m, k): a = numpy.array(m) ia = dpnp.array(a) @@ -276,9 +354,11 @@ def test_triu(m, k): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("k", - [-4, -3, -2, -1, 0, 1, 2, 3, 4], - ids=['-4', '-3', '-2', '-1', '0', '1', '2', '3', '4']) +@pytest.mark.parametrize( + "k", + [-4, -3, -2, -1, 0, 1, 2, 3, 4], + ids=["-4", "-3", "-2", "-1", "0", "1", "2", "3", "4"], +) def test_triu_size_null(k): a = numpy.ones(shape=(1, 2, 0)) ia = dpnp.array(a) @@ -287,23 +367,25 @@ def test_triu_size_null(k): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[1, 2, 3, 4], - [], - [0, 3, 5]], - ids=['[1, 2, 3, 4]', - '[]', - '[0, 3, 5]']) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, - numpy.int32, numpy.bool, numpy.complex128], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'complex128']) -@pytest.mark.parametrize("n", - [0, 1, 4, None], - ids=['0', '1', '4', 'None']) -@pytest.mark.parametrize("increase", - [True, False], - ids=['True', 'False']) +@pytest.mark.parametrize( + "array", + [[1, 2, 3, 4], [], [0, 3, 5]], + ids=["[1, 2, 3, 4]", "[]", "[0, 3, 5]"], +) +@pytest.mark.parametrize( + "type", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.complex128, + ], + ids=["float64", "float32", "int64", "int32", "bool", "complex128"], +) +@pytest.mark.parametrize("n", [0, 1, 4, None], ids=["0", "1", "4", "None"]) +@pytest.mark.parametrize("increase", [True, False], ids=["True", "False"]) def test_vander(array, type, n, increase): a_np = numpy.array(array, dtype=type) a_dpnp = dpnp.array(array, dtype=type) diff --git a/tests/test_arraymanipulation.py b/tests/test_arraymanipulation.py index 9b06bf9596d3..676e699f91c6 100644 --- a/tests/test_arraymanipulation.py +++ b/tests/test_arraymanipulation.py @@ -1,15 +1,17 @@ +import numpy import pytest import dpnp -import numpy -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("data", - [[1, 2, 3], [1., 2., 3.]], - ids=["[1, 2, 3]", "[1., 2., 3.]"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "data", [[1, 2, 3], [1.0, 2.0, 3.0]], ids=["[1, 2, 3]", "[1., 2., 3.]"] +) def test_asfarray(dtype, data): expected = numpy.asfarray(data, dtype) result = dpnp.asfarray(data, dtype) @@ -17,12 +19,14 @@ def test_asfarray(dtype, data): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("data", - [[1, 2, 3], [1., 2., 3.]], - ids=["[1, 2, 3]", "[1., 2., 3.]"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "data", [[1, 2, 3], [1.0, 2.0, 3.0]], ids=["[1, 2, 3]", "[1., 2., 3.]"] +) def test_asfarray2(dtype, data): expected = numpy.asfarray(numpy.array(data), dtype) result = dpnp.asfarray(dpnp.array(data), dtype) @@ -51,35 +55,53 @@ def test_concatenate(self): numpy.testing.assert_array_equal(dpnp.concatenate((r4,)), r4) # Any sequence numpy.testing.assert_array_equal(dpnp.concatenate((tuple(r4),)), r4) - numpy.testing.assert_array_equal(dpnp.concatenate((dpnp.array(r4),)), r4) + numpy.testing.assert_array_equal( + dpnp.concatenate((dpnp.array(r4),)), r4 + ) # 1D default concatenation r3 = list(range(3)) numpy.testing.assert_array_equal(dpnp.concatenate((r4, r3)), r4 + r3) # Mixed sequence types - numpy.testing.assert_array_equal(dpnp.concatenate((tuple(r4), r3)), r4 + r3) - numpy.testing.assert_array_equal(dpnp.concatenate((dpnp.array(r4), r3)), r4 + r3) + numpy.testing.assert_array_equal( + dpnp.concatenate((tuple(r4), r3)), r4 + r3 + ) + numpy.testing.assert_array_equal( + dpnp.concatenate((dpnp.array(r4), r3)), r4 + r3 + ) # Explicit axis specification numpy.testing.assert_array_equal(dpnp.concatenate((r4, r3), 0), r4 + r3) # Including negative - numpy.testing.assert_array_equal(dpnp.concatenate((r4, r3), -1), r4 + r3) + numpy.testing.assert_array_equal( + dpnp.concatenate((r4, r3), -1), r4 + r3 + ) # 2D a23 = dpnp.array([[10, 11, 12], [13, 14, 15]]) a13 = dpnp.array([[0, 1, 2]]) res = dpnp.array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) numpy.testing.assert_array_equal(dpnp.concatenate((a23, a13)), res) numpy.testing.assert_array_equal(dpnp.concatenate((a23, a13), 0), res) - numpy.testing.assert_array_equal(dpnp.concatenate((a23.T, a13.T), 1), res.T) - numpy.testing.assert_array_equal(dpnp.concatenate((a23.T, a13.T), -1), res.T) + numpy.testing.assert_array_equal( + dpnp.concatenate((a23.T, a13.T), 1), res.T + ) + numpy.testing.assert_array_equal( + dpnp.concatenate((a23.T, a13.T), -1), res.T + ) # Arrays much match shape - numpy.testing.assert_raises(ValueError, dpnp.concatenate, (a23.T, a13.T), 0) + numpy.testing.assert_raises( + ValueError, dpnp.concatenate, (a23.T, a13.T), 0 + ) # 3D res = dpnp.reshape(dpnp.arange(2 * 3 * 7), (2, 3, 7)) a0 = res[..., :4] a1 = res[..., 4:6] a2 = res[..., 6:] numpy.testing.assert_array_equal(dpnp.concatenate((a0, a1, a2), 2), res) - numpy.testing.assert_array_equal(dpnp.concatenate((a0, a1, a2), -1), res) - numpy.testing.assert_array_equal(dpnp.concatenate((a0.T, a1.T, a2.T), 0), res.T) + numpy.testing.assert_array_equal( + dpnp.concatenate((a0, a1, a2), -1), res + ) + numpy.testing.assert_array_equal( + dpnp.concatenate((a0.T, a1.T, a2.T), 0), res.T + ) out = dpnp.copy(res) rout = dpnp.concatenate((a0, a1, a2), 2, out=out) diff --git a/tests/test_bitwise.py b/tests/test_bitwise.py index 8b21bcd2644a..52a545d5a841 100644 --- a/tests/test_bitwise.py +++ b/tests/test_bitwise.py @@ -1,15 +1,27 @@ +import numpy import pytest import dpnp as inp -import numpy - -@pytest.mark.parametrize("lhs", [[[-7, -6, -5, -4, -3, -2, -1], [0, 1, 2, 3, 4, 5, 6]], [-3, -2, -1, 0, 1, 2, 3], 0]) -@pytest.mark.parametrize("rhs", [[[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13]], [0, 1, 2, 3, 4, 5, 6], 3]) +@pytest.mark.parametrize( + "lhs", + [ + [[-7, -6, -5, -4, -3, -2, -1], [0, 1, 2, 3, 4, 5, 6]], + [-3, -2, -1, 0, 1, 2, 3], + 0, + ], +) +@pytest.mark.parametrize( + "rhs", + [ + [[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13]], + [0, 1, 2, 3, 4, 5, 6], + 3, + ], +) @pytest.mark.parametrize("dtype", [numpy.int32, numpy.int64]) class TestBitwise: - @staticmethod def array_or_scalar(xp, data, dtype=None): if numpy.isscalar(data): @@ -38,19 +50,19 @@ def _test_binary_int(self, name, lhs, rhs, dtype): numpy.testing.assert_array_equal(result, expected) def test_bitwise_and(self, lhs, rhs, dtype): - self._test_binary_int('bitwise_and', lhs, rhs, dtype) + self._test_binary_int("bitwise_and", lhs, rhs, dtype) def test_bitwise_or(self, lhs, rhs, dtype): - self._test_binary_int('bitwise_or', lhs, rhs, dtype) + self._test_binary_int("bitwise_or", lhs, rhs, dtype) def test_bitwise_xor(self, lhs, rhs, dtype): - self._test_binary_int('bitwise_xor', lhs, rhs, dtype) + self._test_binary_int("bitwise_xor", lhs, rhs, dtype) def test_invert(self, lhs, rhs, dtype): - self._test_unary_int('invert', lhs, dtype) + self._test_unary_int("invert", lhs, dtype) def test_left_shift(self, lhs, rhs, dtype): - self._test_binary_int('left_shift', lhs, rhs, dtype) + self._test_binary_int("left_shift", lhs, rhs, dtype) def test_right_shift(self, lhs, rhs, dtype): - self._test_binary_int('right_shift', lhs, rhs, dtype) + self._test_binary_int("right_shift", lhs, rhs, dtype) diff --git a/tests/test_counting.py b/tests/test_counting.py index 919702a4cc1e..fa39ad9aee8f 100644 --- a/tests/test_counting.py +++ b/tests/test_counting.py @@ -1,15 +1,15 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("size", - [2, 4, 8, 16, 3, 9, 27, 81]) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("size", [2, 4, 8, 16, 3, 9, 27, 81]) def test_count_nonzero(type, size): a = numpy.arange(size, dtype=type) for i in range(int(size / 2)): diff --git a/tests/test_dot.py b/tests/test_dot.py index ae6341ea9094..ff47af5a76ce 100644 --- a/tests/test_dot.py +++ b/tests/test_dot.py @@ -1,13 +1,14 @@ +import numpy import pytest import dpnp as inp -import numpy - -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_dot_ones(type): n = 10**5 a = numpy.ones(n, dtype=type) @@ -20,9 +21,11 @@ def test_dot_ones(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_dot_arange(type): n = 10**2 m = 10**3 @@ -36,9 +39,11 @@ def test_dot_arange(type): numpy.testing.assert_allclose(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_multi_dot(type): n = 16 a = inp.reshape(inp.arange(n, dtype=type), (4, 4)) diff --git a/tests/test_dparray.py b/tests/test_dparray.py index aa0c3f9ae6ef..ba854dfdd7e5 100644 --- a/tests/test_dparray.py +++ b/tests/test_dparray.py @@ -1,17 +1,40 @@ -import dpnp import numpy import pytest +import dpnp + -@pytest.mark.parametrize("res_dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool, numpy.bool_, numpy.complex], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'bool_', 'complex']) -@pytest.mark.parametrize("arr_dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool, numpy.bool_, numpy.complex], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'bool_', 'complex']) -@pytest.mark.parametrize("arr", - [[-2, -1, 0, 1, 2], [[-2, -1], [1, 2]], []], - ids=['[-2, -1, 0, 1, 2]', '[[-2, -1], [1, 2]]', '[]']) +@pytest.mark.parametrize( + "res_dtype", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.bool_, + numpy.complex, + ], + ids=["float64", "float32", "int64", "int32", "bool", "bool_", "complex"], +) +@pytest.mark.parametrize( + "arr_dtype", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.bool_, + numpy.complex, + ], + ids=["float64", "float32", "int64", "int32", "bool", "bool_", "complex"], +) +@pytest.mark.parametrize( + "arr", + [[-2, -1, 0, 1, 2], [[-2, -1], [1, 2]], []], + ids=["[-2, -1, 0, 1, 2]", "[[-2, -1], [1, 2]]", "[]"], +) def test_astype(arr, arr_dtype, res_dtype): numpy_array = numpy.array(arr, dtype=arr_dtype) dpnp_array = dpnp.array(numpy_array) @@ -20,12 +43,24 @@ def test_astype(arr, arr_dtype, res_dtype): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("arr_dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool, numpy.bool_, numpy.complex], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'bool_', 'complex']) -@pytest.mark.parametrize("arr", - [[-2, -1, 0, 1, 2], [[-2, -1], [1, 2]], []], - ids=['[-2, -1, 0, 1, 2]', '[[-2, -1], [1, 2]]', '[]']) +@pytest.mark.parametrize( + "arr_dtype", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.bool_, + numpy.complex, + ], + ids=["float64", "float32", "int64", "int32", "bool", "bool_", "complex"], +) +@pytest.mark.parametrize( + "arr", + [[-2, -1, 0, 1, 2], [[-2, -1], [1, 2]], []], + ids=["[-2, -1, 0, 1, 2]", "[[-2, -1], [1, 2]]", "[]"], +) def test_flatten(arr, arr_dtype): numpy_array = numpy.array(arr, dtype=arr_dtype) dpnp_array = dpnp.array(arr, dtype=arr_dtype) diff --git a/tests/test_fft.py b/tests/test_fft.py index 66019defd1ac..b9f5c616f207 100644 --- a/tests/test_fft.py +++ b/tests/test_fft.py @@ -1,12 +1,13 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("type", ['complex128', 'complex64', 'float32', 'float64', 'int32', 'int64']) -@pytest.mark.parametrize("norm", [None, 'forward', 'ortho']) +@pytest.mark.parametrize( + "type", ["complex128", "complex64", "float32", "float64", "int32", "int64"] +) +@pytest.mark.parametrize("norm", [None, "forward", "ortho"]) def test_fft(type, norm): # 1 dim array data = numpy.arange(100, dtype=numpy.dtype(type)) @@ -22,9 +23,11 @@ def test_fft(type, norm): assert dpnp_res.dtype == np_res.dtype -@pytest.mark.parametrize("type", ['complex128', 'complex64', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize( + "type", ["complex128", "complex64", "float32", "float64", "int32", "int64"] +) @pytest.mark.parametrize("shape", [(8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)]) -@pytest.mark.parametrize("norm", [None, 'forward', 'ortho']) +@pytest.mark.parametrize("norm", [None, "forward", "ortho"]) def test_fft_ndim(type, shape, norm): np_data = numpy.arange(64, dtype=numpy.dtype(type)).reshape(shape) dpnp_data = dpnp.arange(64, dtype=numpy.dtype(type)).reshape(shape) @@ -36,9 +39,13 @@ def test_fft_ndim(type, shape, norm): assert dpnp_res.dtype == np_res.dtype -@pytest.mark.parametrize("type", ['complex128', 'complex64', 'float32', 'float64', 'int32', 'int64']) -@pytest.mark.parametrize("shape", [(64,), (8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)]) -@pytest.mark.parametrize("norm", [None, 'forward', 'ortho']) +@pytest.mark.parametrize( + "type", ["complex128", "complex64", "float32", "float64", "int32", "int64"] +) +@pytest.mark.parametrize( + "shape", [(64,), (8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)] +) +@pytest.mark.parametrize("norm", [None, "forward", "ortho"]) def test_fft_ifft(type, shape, norm): np_data = numpy.arange(64, dtype=numpy.dtype(type)).reshape(shape) dpnp_data = dpnp.arange(64, dtype=numpy.dtype(type)).reshape(shape) @@ -50,8 +57,10 @@ def test_fft_ifft(type, shape, norm): assert dpnp_res.dtype == np_res.dtype -@pytest.mark.parametrize("type", ['float32', 'float64', 'int32', 'int64']) -@pytest.mark.parametrize("shape", [(64, ), (8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)]) +@pytest.mark.parametrize("type", ["float32", "float64", "int32", "int64"]) +@pytest.mark.parametrize( + "shape", [(64,), (8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)] +) def test_fft_rfft(type, shape): np_data = numpy.arange(64, dtype=numpy.dtype(type)).reshape(shape) dpnp_data = dpnp.arange(64, dtype=numpy.dtype(type)).reshape(shape) diff --git a/tests/test_flat.py b/tests/test_flat.py index c5152559e408..e061520f61c5 100644 --- a/tests/test_flat.py +++ b/tests/test_flat.py @@ -1,13 +1,10 @@ +import numpy import pytest import dpnp as inp -import numpy - -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_flat(type): a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9]) ia = inp.array(a) @@ -17,9 +14,7 @@ def test_flat(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_flat2(type): a = numpy.arange(1, 7).reshape(2, 3) ia = inp.array(a) @@ -29,9 +24,7 @@ def test_flat2(type): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.int64], - ids=['int64']) +@pytest.mark.parametrize("type", [numpy.int64], ids=["int64"]) def test_flat3(type): a = numpy.arange(1, 7).reshape(2, 3).T ia = inp.array(a) diff --git a/tests/test_histograms.py b/tests/test_histograms.py index 825a407d47d3..9d862d904cf7 100644 --- a/tests/test_histograms.py +++ b/tests/test_histograms.py @@ -1,12 +1,10 @@ +import numpy import pytest import dpnp -import numpy - class TestHistogram: - def setup(self): pass @@ -27,12 +25,17 @@ def test_simple(self): def test_one_bin(self): # Ticket 632 hist, edges = dpnp.histogram([1, 2, 3, 4], [1, 2]) - numpy.testing.assert_array_equal(hist, [2, ]) + numpy.testing.assert_array_equal( + hist, + [ + 2, + ], + ) numpy.testing.assert_array_equal(edges, [1, 2]) numpy.testing.assert_raises(ValueError, dpnp.histogram, [1, 2], bins=0) h, e = dpnp.histogram([1, 2], bins=1) numpy.testing.assert_equal(h, dpnp.array([2])) - numpy.testing.assert_allclose(e, dpnp.array([1., 2.])) + numpy.testing.assert_allclose(e, dpnp.array([1.0, 2.0])) def test_density(self): # Check that the integral of the density equals 1. @@ -46,7 +49,7 @@ def test_density(self): v = dpnp.arange(10) bins = [0, 1, 3, 6, 10] a, b = dpnp.histogram(v, bins, density=True) - numpy.testing.assert_array_equal(a, .1) + numpy.testing.assert_array_equal(a, 0.1) numpy.testing.assert_equal(dpnp.sum(a * dpnp.diff(b))[0], 1) # Test that passing False works too @@ -58,16 +61,17 @@ def test_density(self): v = dpnp.arange(10) bins = [0, 1, 3, 6, numpy.inf] a, b = dpnp.histogram(v, bins, density=True) - numpy.testing.assert_array_equal(a, [.1, .1, .1, 0.]) + numpy.testing.assert_array_equal(a, [0.1, 0.1, 0.1, 0.0]) # Taken from a bug report from N. Becker on the numpy-discussion # mailing list Aug. 6, 2010. counts, dmy = dpnp.histogram( - [1, 2, 3, 4], [0.5, 1.5, numpy.inf], density=True) - numpy.testing.assert_equal(counts, [.25, 0]) + [1, 2, 3, 4], [0.5, 1.5, numpy.inf], density=True + ) + numpy.testing.assert_equal(counts, [0.25, 0]) def test_arr_weights_mismatch(self): - a = dpnp.arange(10) + .5 - w = dpnp.arange(11) + .5 + a = dpnp.arange(10) + 0.5 + w = dpnp.arange(11) + 0.5 with numpy.testing.assert_raises_regex(ValueError, "same shape as"): h, b = dpnp.histogram(a, range=[1, 9], weights=w, density=True) diff --git a/tests/test_indexing.py b/tests/test_indexing.py index 6519576171d0..875317e32ed9 100644 --- a/tests/test_indexing.py +++ b/tests/test_indexing.py @@ -1,9 +1,8 @@ +import numpy import pytest import dpnp -import numpy - def test_choose(): a = numpy.r_[:4] @@ -18,29 +17,41 @@ def test_choose(): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("offset", - [0, 1], - ids=['0', '1']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]], - [[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [ - [[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]], - [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]', - '[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]', - '[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]']) +@pytest.mark.parametrize("offset", [0, 1], ids=["0", "1"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + [ + [[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], + [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]], + ], + [ + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], + [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + "[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]", + "[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]", + ], +) def test_diagonal(array, offset): a = numpy.array(array) ia = dpnp.array(a) @@ -49,22 +60,29 @@ def test_diagonal(array, offset): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("val", - [-1, 0, 1], - ids=['-1', '0', '1']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize("val", [-1, 0, 1], ids=["-1", "0", "1"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_fill_diagonal(array, val): a = numpy.array(array) ia = dpnp.array(a) @@ -73,34 +91,54 @@ def test_fill_diagonal(array, val): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("dimension", - [(1, ), (2, ), (1, 2), (2, 3), (3, 2), [1], [2], [1, 2], [2, 3], [3, 2]], - ids=['(1, )', '(2, )', '(1, 2)', '(2, 3)', '(3, 2)', - '[1]', '[2]', '[1, 2]', '[2, 3]', '[3, 2]']) +@pytest.mark.parametrize( + "dimension", + [(1,), (2,), (1, 2), (2, 3), (3, 2), [1], [2], [1, 2], [2, 3], [3, 2]], + ids=[ + "(1, )", + "(2, )", + "(1, 2)", + "(2, 3)", + "(3, 2)", + "[1]", + "[2]", + "[1, 2]", + "[2, 3]", + "[3, 2]", + ], +) def test_indices(dimension): expected = numpy.indices(dimension) result = dpnp.indices(dimension) numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[], - [[0, 0], [0, 0]], - [[1, 0], [1, 0]], - [[1, 2], [3, 4]], - [[0, 1, 2], [3, 0, 5], [6, 7, 0]], - [[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]], - [[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]], - [[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [ - [[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]], - ids=['[]', - '[[0, 0], [0, 0]]', - '[[1, 0], [1, 0]]', - '[[1, 2], [3, 4]]', - '[[0, 1, 2], [3, 0, 5], [6, 7, 0]]', - '[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]]', - '[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]]', - '[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]']) +@pytest.mark.parametrize( + "array", + [ + [], + [[0, 0], [0, 0]], + [[1, 0], [1, 0]], + [[1, 2], [3, 4]], + [[0, 1, 2], [3, 0, 5], [6, 7, 0]], + [[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]], + [[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]], + [ + [[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], + [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]], + ], + ], + ids=[ + "[]", + "[[0, 0], [0, 0]]", + "[[1, 0], [1, 0]]", + "[[1, 2], [3, 4]]", + "[[0, 1, 2], [3, 0, 5], [6, 7, 0]]", + "[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]]", + "[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]]", + "[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]", + ], +) def test_nonzero(array): a = numpy.array(array) ia = dpnp.array(array) @@ -109,25 +147,27 @@ def test_nonzero(array): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("vals", - [[100, 200], - (100, 200)], - ids=['[100, 200]', - '(100, 200)']) -@pytest.mark.parametrize("mask", - [[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [True, True]]], - ids=['[[True, False], [False, True]]', - '[[False, True], [True, False]]', - '[[False, False], [True, True]]']) -@pytest.mark.parametrize("arr", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]']) +@pytest.mark.parametrize( + "vals", [[100, 200], (100, 200)], ids=["[100, 200]", "(100, 200)"] +) +@pytest.mark.parametrize( + "mask", + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [True, True]], + ], + ids=[ + "[[True, False], [False, True]]", + "[[False, True], [True, False]]", + "[[False, False], [True, True]]", + ], +) +@pytest.mark.parametrize( + "arr", + [[[0, 0], [0, 0]], [[1, 2], [1, 2]], [[1, 2], [3, 4]]], + ids=["[[0, 0], [0, 0]]", "[[1, 2], [1, 2]]", "[[1, 2], [3, 4]]"], +) def test_place1(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -138,19 +178,37 @@ def test_place1(arr, mask, vals): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("vals", - [[100, 200], - [100, 200, 300, 400, 500, 600], - [100, 200, 300, 400, 500, 600, 800, 900]], - ids=['[100, 200]', - '[100, 200, 300, 400, 500, 600]', - '[100, 200, 300, 400, 500, 600, 800, 900]']) -@pytest.mark.parametrize("mask", - [[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]], - ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]']) -@pytest.mark.parametrize("arr", - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]], - ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]']) +@pytest.mark.parametrize( + "vals", + [ + [100, 200], + [100, 200, 300, 400, 500, 600], + [100, 200, 300, 400, 500, 600, 800, 900], + ], + ids=[ + "[100, 200]", + "[100, 200, 300, 400, 500, 600]", + "[100, 200, 300, 400, 500, 600, 800, 900]", + ], +) +@pytest.mark.parametrize( + "mask", + [ + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [True, True]], + ] + ], + ids=[ + "[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]" + ], +) +@pytest.mark.parametrize( + "arr", + [[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]], + ids=["[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]"], +) def test_place2(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -161,20 +219,43 @@ def test_place2(arr, mask, vals): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("vals", - [[100, 200], - [100, 200, 300, 400, 500, 600], - [100, 200, 300, 400, 500, 600, 800, 900]], - ids=['[100, 200]', - '[100, 200, 300, 400, 500, 600]', - '[100, 200, 300, 400, 500, 600, 800, 900]']) -@pytest.mark.parametrize("mask", - [[[[[False, False], [True, True]], [[True, True], [True, True]]], [ - [[False, False], [True, True]], [[False, False], [False, False]]]]], - ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]']) -@pytest.mark.parametrize("arr", - [[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize( + "vals", + [ + [100, 200], + [100, 200, 300, 400, 500, 600], + [100, 200, 300, 400, 500, 600, 800, 900], + ], + ids=[ + "[100, 200]", + "[100, 200, 300, 400, 500, 600]", + "[100, 200, 300, 400, 500, 600, 800, 900]", + ], +) +@pytest.mark.parametrize( + "mask", + [ + [ + [[[False, False], [True, True]], [[True, True], [True, True]]], + [[[False, False], [True, True]], [[False, False], [False, False]]], + ] + ], + ids=[ + "[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]" + ], +) +@pytest.mark.parametrize( + "arr", + [ + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ] + ], + ids=[ + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]" + ], +) def test_place3(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -185,23 +266,28 @@ def test_place3(arr, mask, vals): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("v", - [0, 1, 2, 3, 4], - ids=['0', '1', '2', '3', '4']) -@pytest.mark.parametrize("ind", - [0, 1, 2, 3], - ids=['0', '1', '2', '3']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize("v", [0, 1, 2, 3, 4], ids=["0", "1", "2", "3", "4"]) +@pytest.mark.parametrize("ind", [0, 1, 2, 3], ids=["0", "1", "2", "3"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_put(array, ind, v): a = numpy.array(array) ia = dpnp.array(a) @@ -210,23 +296,30 @@ def test_put(array, ind, v): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("v", - [[10, 20], [30, 40]], - ids=['[10, 20]', '[30, 40]']) -@pytest.mark.parametrize("ind", - [[0, 1], [2, 3]], - ids=['[0, 1]', '[2, 3]']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize( + "v", [[10, 20], [30, 40]], ids=["[10, 20]", "[30, 40]"] +) +@pytest.mark.parametrize("ind", [[0, 1], [2, 3]], ids=["[0, 1]", "[2, 3]"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_put2(array, ind, v): a = numpy.array(array) ia = dpnp.array(a) @@ -276,23 +369,25 @@ def test_put_along_axis2(): numpy.testing.assert_array_equal(a, ai) -@pytest.mark.parametrize("vals", - [[100, 200]], - ids=['[100, 200]']) -@pytest.mark.parametrize("mask", - [[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [True, True]]], - ids=['[[True, False], [False, True]]', - '[[False, True], [True, False]]', - '[[False, False], [True, True]]']) -@pytest.mark.parametrize("arr", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]']) +@pytest.mark.parametrize("vals", [[100, 200]], ids=["[100, 200]"]) +@pytest.mark.parametrize( + "mask", + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [True, True]], + ], + ids=[ + "[[True, False], [False, True]]", + "[[False, True], [True, False]]", + "[[False, False], [True, True]]", + ], +) +@pytest.mark.parametrize( + "arr", + [[[0, 0], [0, 0]], [[1, 2], [1, 2]], [[1, 2], [3, 4]]], + ids=["[[0, 0], [0, 0]]", "[[1, 2], [1, 2]]", "[[1, 2], [3, 4]]"], +) def test_putmask1(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -305,19 +400,37 @@ def test_putmask1(arr, mask, vals): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("vals", - [[100, 200], - [100, 200, 300, 400, 500, 600], - [100, 200, 300, 400, 500, 600, 800, 900]], - ids=['[100, 200]', - '[100, 200, 300, 400, 500, 600]', - '[100, 200, 300, 400, 500, 600, 800, 900]']) -@pytest.mark.parametrize("mask", - [[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]], - ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]']) -@pytest.mark.parametrize("arr", - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]], - ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]']) +@pytest.mark.parametrize( + "vals", + [ + [100, 200], + [100, 200, 300, 400, 500, 600], + [100, 200, 300, 400, 500, 600, 800, 900], + ], + ids=[ + "[100, 200]", + "[100, 200, 300, 400, 500, 600]", + "[100, 200, 300, 400, 500, 600, 800, 900]", + ], +) +@pytest.mark.parametrize( + "mask", + [ + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [True, True]], + ] + ], + ids=[ + "[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]" + ], +) +@pytest.mark.parametrize( + "arr", + [[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]], + ids=["[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]"], +) def test_putmask2(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -330,20 +443,43 @@ def test_putmask2(arr, mask, vals): numpy.testing.assert_array_equal(a, ia) -@pytest.mark.parametrize("vals", - [[100, 200], - [100, 200, 300, 400, 500, 600], - [100, 200, 300, 400, 500, 600, 800, 900]], - ids=['[100, 200]', - '[100, 200, 300, 400, 500, 600]', - '[100, 200, 300, 400, 500, 600, 800, 900]']) -@pytest.mark.parametrize("mask", - [[[[[False, False], [True, True]], [[True, True], [True, True]]], [ - [[False, False], [True, True]], [[False, False], [False, False]]]]], - ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]']) -@pytest.mark.parametrize("arr", - [[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize( + "vals", + [ + [100, 200], + [100, 200, 300, 400, 500, 600], + [100, 200, 300, 400, 500, 600, 800, 900], + ], + ids=[ + "[100, 200]", + "[100, 200, 300, 400, 500, 600]", + "[100, 200, 300, 400, 500, 600, 800, 900]", + ], +) +@pytest.mark.parametrize( + "mask", + [ + [ + [[[False, False], [True, True]], [[True, True], [True, True]]], + [[[False, False], [True, True]], [[False, False], [False, False]]], + ] + ], + ids=[ + "[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]" + ], +) +@pytest.mark.parametrize( + "arr", + [ + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ] + ], + ids=[ + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]" + ], +) def test_putmask3(arr, mask, vals): a = numpy.array(arr) ia = dpnp.array(a) @@ -357,8 +493,12 @@ def test_putmask3(arr, mask, vals): def test_select(): - cond_val1 = numpy.array([True, True, True, False, False, False, False, False, False, False]) - cond_val2 = numpy.array([False, False, False, False, False, True, True, True, True, True]) + cond_val1 = numpy.array( + [True, True, True, False, False, False, False, False, False, False] + ) + cond_val2 = numpy.array( + [False, False, False, False, False, True, True, True, True, True] + ) icond_val1 = dpnp.array(cond_val1) icond_val2 = dpnp.array(cond_val2) condlist = [cond_val1, cond_val2] @@ -374,33 +514,54 @@ def test_select(): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array_type", - [numpy.bool8, numpy.int32, numpy.int64, numpy.float32, numpy.float64, numpy.complex128], - ids=['bool8', 'int32', 'int64', 'float32', 'float64', 'complex128']) -@pytest.mark.parametrize("indices_type", - [numpy.int32, numpy.int64], - ids=['int32', 'int64']) -@pytest.mark.parametrize("indices", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]']) -@pytest.mark.parametrize("array", - [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], - [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]], - [[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [ - [[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]], - [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]], - ids=['[[0, 1, 2], [3, 4, 5], [6, 7, 8]]', - '[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]', - '[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]', - '[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]']) +@pytest.mark.parametrize( + "array_type", + [ + numpy.bool8, + numpy.int32, + numpy.int64, + numpy.float32, + numpy.float64, + numpy.complex128, + ], + ids=["bool8", "int32", "int64", "float32", "float64", "complex128"], +) +@pytest.mark.parametrize( + "indices_type", [numpy.int32, numpy.int64], ids=["int32", "int64"] +) +@pytest.mark.parametrize( + "indices", + [[[0, 0], [0, 0]], [[1, 2], [1, 2]], [[1, 2], [3, 4]]], + ids=["[[0, 0], [0, 0]]", "[[1, 2], [1, 2]]", "[[1, 2], [3, 4]]"], +) +@pytest.mark.parametrize( + "array", + [ + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + [ + [[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], + [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]], + ], + [ + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], + [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]], + ], + ], + ids=[ + "[[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + "[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]", + "[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]", + ], +) def test_take(array, indices, array_type, indices_type): a = numpy.array(array, dtype=array_type) ind = numpy.array(indices, dtype=indices_type) @@ -433,31 +594,33 @@ def test_take_along_axis1(): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("m", - [None, 0, 1, 2, 3, 4], - ids=['None', '0', '1', '2', '3', '4']) -@pytest.mark.parametrize("k", - [0, 1, 2, 3, 4, 5], - ids=['0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("n", - [1, 2, 3, 4, 5, 6], - ids=['1', '2', '3', '4', '5', '6']) +@pytest.mark.parametrize( + "m", [None, 0, 1, 2, 3, 4], ids=["None", "0", "1", "2", "3", "4"] +) +@pytest.mark.parametrize( + "k", [0, 1, 2, 3, 4, 5], ids=["0", "1", "2", "3", "4", "5"] +) +@pytest.mark.parametrize( + "n", [1, 2, 3, 4, 5, 6], ids=["1", "2", "3", "4", "5", "6"] +) def test_tril_indices(n, k, m): result = dpnp.tril_indices(n, k, m) expected = numpy.tril_indices(n, k, m) numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("k", - [0, 1, 2, 3, 4, 5], - ids=['0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], ], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]']) +@pytest.mark.parametrize( + "k", [0, 1, 2, 3, 4, 5], ids=["0", "1", "2", "3", "4", "5"] +) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + ], + ids=["[[0, 0], [0, 0]]", "[[1, 2], [1, 2]]", "[[1, 2], [3, 4]]"], +) def test_tril_indices_from(array, k): a = numpy.array(array) ia = dpnp.array(a) @@ -466,31 +629,33 @@ def test_tril_indices_from(array, k): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("m", - [None, 0, 1, 2, 3, 4], - ids=['None', '0', '1', '2', '3', '4']) -@pytest.mark.parametrize("k", - [0, 1, 2, 3, 4, 5], - ids=['0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("n", - [1, 2, 3, 4, 5, 6], - ids=['1', '2', '3', '4', '5', '6']) +@pytest.mark.parametrize( + "m", [None, 0, 1, 2, 3, 4], ids=["None", "0", "1", "2", "3", "4"] +) +@pytest.mark.parametrize( + "k", [0, 1, 2, 3, 4, 5], ids=["0", "1", "2", "3", "4", "5"] +) +@pytest.mark.parametrize( + "n", [1, 2, 3, 4, 5, 6], ids=["1", "2", "3", "4", "5", "6"] +) def test_triu_indices(n, k, m): result = dpnp.triu_indices(n, k, m) expected = numpy.triu_indices(n, k, m) numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("k", - [0, 1, 2, 3, 4, 5], - ids=['0', '1', '2', '3', '4', '5']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], ], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]']) +@pytest.mark.parametrize( + "k", [0, 1, 2, 3, 4, 5], ids=["0", "1", "2", "3", "4", "5"] +) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + ], + ids=["[[0, 0], [0, 0]]", "[[1, 2], [1, 2]]", "[[1, 2], [3, 4]]"], +) def test_triu_indices_from(array, k): a = numpy.array(array) ia = dpnp.array(a) diff --git a/tests/test_linalg.py b/tests/test_linalg.py index ecd12040fd5d..5b08dc1b4015 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -1,9 +1,8 @@ +import numpy import pytest import dpnp as inp -import numpy - def vvsort(val, vec, size, xp): for i in range(size): @@ -29,13 +28,19 @@ def vvsort(val, vec, size, xp): vec[k, imax] = temp -@pytest.mark.parametrize("array", - [[[[1, -2], [2, 5]]], - [[[1., -2.], [2., 5.]]], - [[[1., -2.], [2., 5.]], [[1., -2.], [2., 5.]]]], - ids=['[[[1, -2], [2, 5]]]', - '[[[1., -2.], [2., 5.]]]', - '[[[1., -2.], [2., 5.]], [[1., -2.], [2., 5.]]]']) +@pytest.mark.parametrize( + "array", + [ + [[[1, -2], [2, 5]]], + [[[1.0, -2.0], [2.0, 5.0]]], + [[[1.0, -2.0], [2.0, 5.0]], [[1.0, -2.0], [2.0, 5.0]]], + ], + ids=[ + "[[[1, -2], [2, 5]]]", + "[[[1., -2.], [2., 5.]]]", + "[[[1., -2.], [2., 5.]], [[1., -2.], [2., 5.]]]", + ], +) def test_cholesky(array): a = numpy.array(array) ia = inp.array(a) @@ -44,12 +49,19 @@ def test_cholesky(array): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("arr", - [[[1, 0, -1], [0, 1, 0], [1, 0, 1]], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]], - ids=['[[1, 0, -1], [0, 1, 0], [1, 0, 1]]', '[[1, 2, 3], [4, 5, 6], [7, 8, 9]]']) -@pytest.mark.parametrize("p", - [None, 1, -1, 2, -2, numpy.inf, -numpy.inf, 'fro'], - ids=['None', '1', '-1', '2', '-2', 'numpy.inf', '-numpy.inf', '"fro"']) +@pytest.mark.parametrize( + "arr", + [[[1, 0, -1], [0, 1, 0], [1, 0, 1]], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]], + ids=[ + "[[1, 0, -1], [0, 1, 0], [1, 0, 1]]", + "[[1, 2, 3], [4, 5, 6], [7, 8, 9]]", + ], +) +@pytest.mark.parametrize( + "p", + [None, 1, -1, 2, -2, numpy.inf, -numpy.inf, "fro"], + ids=["None", "1", "-1", "2", "-2", "numpy.inf", "-numpy.inf", '"fro"'], +) def test_cond(arr, p): a = numpy.array(arr) ia = inp.array(a) @@ -58,17 +70,26 @@ def test_cond(arr, p): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_det(array): a = numpy.array(array) ia = inp.array(a) @@ -77,14 +98,19 @@ def test_det(array): numpy.testing.assert_allclose(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("size", - [2, 4, 8, 16, 300]) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("size", [2, 4, 8, 16, 300]) def test_eig_arange(type, size): a = numpy.arange(size * size, dtype=type).reshape((size, size)) - symm_orig = numpy.tril(a) + numpy.tril(a, -1).T + numpy.diag(numpy.full((size,), size * size, dtype=type)) + symm_orig = ( + numpy.tril(a) + + numpy.tril(a, -1).T + + numpy.diag(numpy.full((size,), size * size, dtype=type)) + ) symm = symm_orig dpnp_symm_orig = inp.array(symm) dpnp_symm = dpnp_symm_orig @@ -106,21 +132,17 @@ def test_eig_arange(type, size): numpy.testing.assert_array_equal(symm_orig, symm) numpy.testing.assert_array_equal(dpnp_symm_orig, dpnp_symm) - assert (dpnp_val.dtype == np_val.dtype) - assert (dpnp_vec.dtype == np_vec.dtype) - assert (dpnp_val.shape == np_val.shape) - assert (dpnp_vec.shape == np_vec.shape) + assert dpnp_val.dtype == np_val.dtype + assert dpnp_vec.dtype == np_vec.dtype + assert dpnp_val.shape == np_val.shape + assert dpnp_vec.shape == np_vec.shape numpy.testing.assert_allclose(dpnp_val, np_val, rtol=1e-05, atol=1e-05) numpy.testing.assert_allclose(dpnp_vec, np_vec, rtol=1e-05, atol=1e-05) def test_eigvals(): - arrays = [ - [[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]] - ] + arrays = [[[0, 0], [0, 0]], [[1, 2], [1, 2]], [[1, 2], [3, 4]]] for array in arrays: a = numpy.array(array) ia = inp.array(a) @@ -129,12 +151,16 @@ def test_eigvals(): numpy.testing.assert_allclose(expected, result, atol=0.5) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("array", - [[[1., 2.], [3., 4.]], [[0, 1, 2], [3, 2, -1], [4, -2, 3]]], - ids=['[[1., 2.], [3., 4.]]', '[[0, 1, 2], [3, 2, -1], [4, -2, 3]]']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "array", + [[[1.0, 2.0], [3.0, 4.0]], [[0, 1, 2], [3, 2, -1], [4, -2, 3]]], + ids=["[[1., 2.], [3., 4.]]", "[[0, 1, 2], [3, 2, -1], [4, -2, 3]]"], +) def test_inv(type, array): a = numpy.array(array, dtype=type) ia = inp.array(a) @@ -162,15 +188,15 @@ def test_matrix_rank(): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[7], [1, 2], [1, 0]], - ids=['[7]', '[1, 2]', '[1, 0]']) -@pytest.mark.parametrize("ord", - [None, -numpy.Inf, -2, -1, 0, 1, 2, 3, numpy.Inf], - ids=['None', '-numpy.Inf', '-2', '-1', '0', '1', '2', '3', 'numpy.Inf']) -@pytest.mark.parametrize("axis", - [0, None], - ids=['0', 'None']) +@pytest.mark.parametrize( + "array", [[7], [1, 2], [1, 0]], ids=["[7]", "[1, 2]", "[1, 0]"] +) +@pytest.mark.parametrize( + "ord", + [None, -numpy.Inf, -2, -1, 0, 1, 2, 3, numpy.Inf], + ids=["None", "-numpy.Inf", "-2", "-1", "0", "1", "2", "3", "numpy.Inf"], +) +@pytest.mark.parametrize("axis", [0, None], ids=["0", "None"]) def test_norm1(array, ord, axis): a = numpy.array(array) ia = inp.array(a) @@ -179,15 +205,27 @@ def test_norm1(array, ord, axis): numpy.testing.assert_allclose(expected, result) -@pytest.mark.parametrize("array", - [[[1, 0]], [[1, 2]], [[1, 0], [3, 0]], [[1, 2], [3, 4]]], - ids=['[[1, 0]]', '[[1, 2]]', '[[1, 0], [3, 0]]', '[[1, 2], [3, 4]]']) -@pytest.mark.parametrize("ord", - [None, -numpy.Inf, -2, -1, 1, 2, numpy.Inf, 'fro', 'nuc'], - ids=['None', '-numpy.Inf', '-2', '-1', '1', '2', 'numpy.Inf', '"fro"', '"nuc"']) -@pytest.mark.parametrize("axis", - [(0, 1), None], - ids=['(0, 1)', 'None']) +@pytest.mark.parametrize( + "array", + [[[1, 0]], [[1, 2]], [[1, 0], [3, 0]], [[1, 2], [3, 4]]], + ids=["[[1, 0]]", "[[1, 2]]", "[[1, 0], [3, 0]]", "[[1, 2], [3, 4]]"], +) +@pytest.mark.parametrize( + "ord", + [None, -numpy.Inf, -2, -1, 1, 2, numpy.Inf, "fro", "nuc"], + ids=[ + "None", + "-numpy.Inf", + "-2", + "-1", + "1", + "2", + "numpy.Inf", + '"fro"', + '"nuc"', + ], +) +@pytest.mark.parametrize("axis", [(0, 1), None], ids=["(0, 1)", "None"]) def test_norm2(array, ord, axis): a = numpy.array(array) ia = inp.array(a) @@ -196,15 +234,27 @@ def test_norm2(array, ord, axis): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 0], [3, 0]], [[5, 0], [7, 0]]]], - ids=['[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]', '[[[1, 0], [3, 0]], [[5, 0], [7, 0]]]']) -@pytest.mark.parametrize("ord", - [None, -numpy.Inf, -2, -1, 1, 2, numpy.Inf], - ids=['None', '-numpy.Inf', '-2', '-1', '1', '2', 'numpy.Inf']) -@pytest.mark.parametrize("axis", - [0, 1, 2, (0, 1), (0, 2), (1, 2)], - ids=['0', '1', '2', '(0, 1)', '(0, 2)', '(1, 2)']) +@pytest.mark.parametrize( + "array", + [ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[1, 0], [3, 0]], [[5, 0], [7, 0]]], + ], + ids=[ + "[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]", + "[[[1, 0], [3, 0]], [[5, 0], [7, 0]]]", + ], +) +@pytest.mark.parametrize( + "ord", + [None, -numpy.Inf, -2, -1, 1, 2, numpy.Inf], + ids=["None", "-numpy.Inf", "-2", "-1", "1", "2", "numpy.Inf"], +) +@pytest.mark.parametrize( + "axis", + [0, 1, 2, (0, 1), (0, 2), (1, 2)], + ids=["0", "1", "2", "(0, 1)", "(0, 2)", "(1, 2)"], +) def test_norm3(array, ord, axis): a = numpy.array(array) ia = inp.array(a) @@ -213,15 +263,19 @@ def test_norm3(array, ord, axis): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("shape", - [(2, 2), (3, 4), (5, 3), (16, 16)], - ids=['(2,2)', '(3,4)', '(5,3)', '(16,16)']) -@pytest.mark.parametrize("mode", - ['complete', 'reduced'], - ids=['complete', 'reduced']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "shape", + [(2, 2), (3, 4), (5, 3), (16, 16)], + ids=["(2,2)", "(3,4)", "(5,3)", "(16,16)"], +) +@pytest.mark.parametrize( + "mode", ["complete", "reduced"], ids=["complete", "reduced"] +) def test_qr(type, shape, mode): a = numpy.arange(shape[0] * shape[1], dtype=type).reshape(shape) ia = inp.array(a) @@ -229,10 +283,10 @@ def test_qr(type, shape, mode): np_q, np_r = numpy.linalg.qr(a, mode) dpnp_q, dpnp_r = inp.linalg.qr(ia, mode) - assert (dpnp_q.dtype == np_q.dtype) - assert (dpnp_r.dtype == np_r.dtype) - assert (dpnp_q.shape == np_q.shape) - assert (dpnp_r.shape == np_r.shape) + assert dpnp_q.dtype == np_q.dtype + assert dpnp_r.dtype == np_r.dtype + assert dpnp_q.shape == np_q.shape + assert dpnp_r.shape == np_r.shape if type == numpy.float32: tol = 1e-02 @@ -240,7 +294,12 @@ def test_qr(type, shape, mode): tol = 1e-11 # check decomposition - numpy.testing.assert_allclose(ia, numpy.dot(inp.asnumpy(dpnp_q), inp.asnumpy(dpnp_r)), rtol=tol, atol=tol) + numpy.testing.assert_allclose( + ia, + numpy.dot(inp.asnumpy(dpnp_q), inp.asnumpy(dpnp_r)), + rtol=tol, + atol=tol, + ) # NP change sign for comparison ncols = min(a.shape[0], a.shape[1]) @@ -251,17 +310,23 @@ def test_qr(type, shape, mode): np_r[i, :] = -np_r[i, :] if numpy.any(numpy.abs(np_r[i, :]) > tol): - numpy.testing.assert_allclose(inp.asnumpy(dpnp_q)[:, i], np_q[:, i], rtol=tol, atol=tol) + numpy.testing.assert_allclose( + inp.asnumpy(dpnp_q)[:, i], np_q[:, i], rtol=tol, atol=tol + ) numpy.testing.assert_allclose(dpnp_r, np_r, rtol=tol, atol=tol) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("shape", - [(2, 2), (3, 4), (5, 3), (16, 16)], - ids=['(2,2)', '(3,4)', '(5,3)', '(16,16)']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "shape", + [(2, 2), (3, 4), (5, 3), (16, 16)], + ids=["(2,2)", "(3,4)", "(5,3)", "(16,16)"], +) def test_svd(type, shape): a = numpy.arange(shape[0] * shape[1], dtype=type).reshape(shape) ia = inp.array(a) @@ -269,12 +334,12 @@ def test_svd(type, shape): np_u, np_s, np_vt = numpy.linalg.svd(a) dpnp_u, dpnp_s, dpnp_vt = inp.linalg.svd(ia) - assert (dpnp_u.dtype == np_u.dtype) - assert (dpnp_s.dtype == np_s.dtype) - assert (dpnp_vt.dtype == np_vt.dtype) - assert (dpnp_u.shape == np_u.shape) - assert (dpnp_s.shape == np_s.shape) - assert (dpnp_vt.shape == np_vt.shape) + assert dpnp_u.dtype == np_u.dtype + assert dpnp_s.dtype == np_s.dtype + assert dpnp_vt.dtype == np_vt.dtype + assert dpnp_u.shape == np_u.shape + assert dpnp_s.shape == np_s.shape + assert dpnp_vt.shape == np_vt.shape if type == numpy.float32: tol = 1e-03 @@ -287,7 +352,9 @@ def test_svd(type, shape): dpnp_diag_s[i, i] = dpnp_s[i] # check decomposition - numpy.testing.assert_allclose(ia, inp.dot(dpnp_u, inp.dot(dpnp_diag_s, dpnp_vt)), rtol=tol, atol=tol) + numpy.testing.assert_allclose( + ia, inp.dot(dpnp_u, inp.dot(dpnp_diag_s, dpnp_vt)), rtol=tol, atol=tol + ) # compare singular values # numpy.testing.assert_allclose(dpnp_s, np_s, rtol=tol, atol=tol) @@ -300,5 +367,9 @@ def test_svd(type, shape): # compare vectors for non-zero values for i in range(numpy.count_nonzero(np_s > tol)): - numpy.testing.assert_allclose(inp.asnumpy(dpnp_u)[:, i], np_u[:, i], rtol=tol, atol=tol) - numpy.testing.assert_allclose(inp.asnumpy(dpnp_vt)[i, :], np_vt[i, :], rtol=tol, atol=tol) + numpy.testing.assert_allclose( + inp.asnumpy(dpnp_u)[:, i], np_u[:, i], rtol=tol, atol=tol + ) + numpy.testing.assert_allclose( + inp.asnumpy(dpnp_vt)[i, :], np_vt[i, :], rtol=tol, atol=tol + ) diff --git a/tests/test_logic.py b/tests/test_logic.py index b826740b9bd9..79c4a91a8df9 100644 --- a/tests/test_logic.py +++ b/tests/test_logic.py @@ -1,22 +1,32 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool, numpy.bool_], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'bool_']) -@pytest.mark.parametrize("shape", - [(0,), (4,), (2, 3), (2, 2, 2)], - ids=['(0,)', '(4,)', '(2,3)', '(2,2,2)']) +@pytest.mark.parametrize( + "type", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.bool_, + ], + ids=["float64", "float32", "int64", "int32", "bool", "bool_"], +) +@pytest.mark.parametrize( + "shape", + [(0,), (4,), (2, 3), (2, 2, 2)], + ids=["(0,)", "(4,)", "(2,3)", "(2,2,2)"], +) def test_all(type, shape): size = 1 for i in range(len(shape)): size *= shape[i] - for i in range(2 ** size): + for i in range(2**size): t = i a = numpy.empty(size, dtype=type) @@ -38,9 +48,11 @@ def test_all(type, shape): numpy.testing.assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_allclose(type): a = numpy.random.rand(10) @@ -62,18 +74,29 @@ def test_allclose(type): numpy.testing.assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.bool, numpy.bool_], - ids=['float64', 'float32', 'int64', 'int32', 'bool', 'bool_']) -@pytest.mark.parametrize("shape", - [(0,), (4,), (2, 3), (2, 2, 2)], - ids=['(0,)', '(4,)', '(2,3)', '(2,2,2)']) +@pytest.mark.parametrize( + "type", + [ + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.bool, + numpy.bool_, + ], + ids=["float64", "float32", "int64", "int32", "bool", "bool_"], +) +@pytest.mark.parametrize( + "shape", + [(0,), (4,), (2, 3), (2, 2, 2)], + ids=["(0,)", "(4,)", "(2,3)", "(2,2,2)"], +) def test_any(type, shape): size = 1 for i in range(len(shape)): size *= shape[i] - for i in range(2 ** size): + for i in range(2**size): t = i a = numpy.empty(size, dtype=type) @@ -99,8 +122,8 @@ def test_greater(): a = numpy.array([1, 2, 3, 4, 5, 6, 7, 8]) ia = dpnp.array(a) for i in range(len(a) + 1): - np_res = (a > i) - dpnp_res = (ia > i) + np_res = a > i + dpnp_res = ia > i numpy.testing.assert_equal(dpnp_res, np_res) @@ -108,8 +131,8 @@ def test_greater_equal(): a = numpy.array([1, 2, 3, 4, 5, 6, 7, 8]) ia = dpnp.array(a) for i in range(len(a) + 1): - np_res = (a >= i) - dpnp_res = (ia >= i) + np_res = a >= i + dpnp_res = ia >= i numpy.testing.assert_equal(dpnp_res, np_res) @@ -117,8 +140,8 @@ def test_less(): a = numpy.array([1, 2, 3, 4, 5, 6, 7, 8]) ia = dpnp.array(a) for i in range(len(a) + 1): - np_res = (a < i) - dpnp_res = (ia < i) + np_res = a < i + dpnp_res = ia < i numpy.testing.assert_equal(dpnp_res, np_res) @@ -126,8 +149,8 @@ def test_less_equal(): a = numpy.array([1, 2, 3, 4, 5, 6, 7, 8]) ia = dpnp.array(a) for i in range(len(a) + 1): - np_res = (a <= i) - dpnp_res = (ia <= i) + np_res = a <= i + dpnp_res = ia <= i numpy.testing.assert_equal(dpnp_res, np_res) @@ -135,6 +158,6 @@ def test_not_equal(): a = numpy.array([1, 2, 3, 4, 5, 6, 7, 8]) ia = dpnp.array(a) for i in range(len(a)): - np_res = (a != i) - dpnp_res = (ia != i) + np_res = a != i + dpnp_res = ia != i numpy.testing.assert_equal(dpnp_res, np_res) diff --git a/tests/test_manipulation.py b/tests/test_manipulation.py index 8130fcdb4e86..4c354f4a220a 100644 --- a/tests/test_manipulation.py +++ b/tests/test_manipulation.py @@ -1,13 +1,18 @@ -import pytest import numpy -import dpnp +import pytest +import dpnp testdata = [] -testdata += [([True, False, True], dtype) for dtype in ['float32', 'float64', 'int32', 'int64', 'bool']] -testdata += [([1, -1, 0], dtype) for dtype in ['float32', 'float64', 'int32', 'int64']] -testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ['float32', 'float64']] -testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ['complex128']] +testdata += [ + ([True, False, True], dtype) + for dtype in ["float32", "float64", "int32", "int64", "bool"] +] +testdata += [ + ([1, -1, 0], dtype) for dtype in ["float32", "float64", "int32", "int64"] +] +testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float32", "float64"]] +testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ["complex128"]] @pytest.mark.parametrize("in_obj,out_dtype", testdata) @@ -23,9 +28,11 @@ def test_copyto_dtype(in_obj, out_dtype): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("arr", - [[], [1, 2, 3, 4], [[1, 2], [3, 4]], [[[1], [2]], [[3], [4]]]], - ids=['[]', '[1, 2, 3, 4]', '[[1, 2], [3, 4]]', '[[[1], [2]], [[3], [4]]]']) +@pytest.mark.parametrize( + "arr", + [[], [1, 2, 3, 4], [[1, 2], [3, 4]], [[[1], [2]], [[3], [4]]]], + ids=["[]", "[1, 2, 3, 4]", "[[1, 2], [3, 4]]", "[[[1], [2]], [[3], [4]]]"], +) def test_repeat(arr): a = numpy.array(arr) dpnp_a = dpnp.array(arr) @@ -34,15 +41,11 @@ def test_repeat(arr): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", - [[1, 2, 3], - [1, 2, 2, 1, 2, 4], - [2, 2, 2, 2], - []], - ids=['[1, 2, 3]', - '[1, 2, 2, 1, 2, 4]', - '[2, 2, 2, 2]', - '[]']) +@pytest.mark.parametrize( + "array", + [[1, 2, 3], [1, 2, 2, 1, 2, 4], [2, 2, 2, 2], []], + ids=["[1, 2, 3]", "[1, 2, 2, 1, 2, 4]", "[2, 2, 2, 2]", "[]"], +) def test_unique(array): np_a = numpy.array(array) dpnp_a = dpnp.array(array) diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 21040478ca8d..061560f75f5f 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -1,15 +1,16 @@ +import numpy import pytest import dpnp -import numpy - class TestConvolve: def test_object(self): - d = [1.] * 100 - k = [1.] * 3 - numpy.testing.assert_array_almost_equal(dpnp.convolve(d, k)[2:-2], dpnp.full(98, 3)) + d = [1.0] * 100 + k = [1.0] * 3 + numpy.testing.assert_array_almost_equal( + dpnp.convolve(d, k)[2:-2], dpnp.full(98, 3) + ) def test_no_overwrite(self): d = dpnp.ones(100) @@ -21,8 +22,8 @@ def test_no_overwrite(self): def test_mode(self): d = dpnp.ones(100) k = dpnp.ones(3) - default_mode = dpnp.convolve(d, k, mode='full') - full_mode = dpnp.convolve(d, k, mode='f') + default_mode = dpnp.convolve(d, k, mode="full") + full_mode = dpnp.convolve(d, k, mode="f") numpy.testing.assert_array_equal(full_mode, default_mode) # integer mode with numpy.testing.assert_raises(ValueError): @@ -33,19 +34,26 @@ def test_mode(self): dpnp.convolve(d, k, mode=None) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]] - ], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]' - ]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_diff(array): np_a = numpy.array(array) dpnp_a = dpnp.array(array) @@ -54,15 +62,49 @@ def test_diff(array): numpy.testing.assert_allclose(expected, result) -@pytest.mark.parametrize("dtype1", - [numpy.bool_, numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.complex64, numpy.complex128], - ids=['numpy.bool_', 'numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32', 'numpy.complex64', 'numpy.complex128']) -@pytest.mark.parametrize("dtype2", - [numpy.bool_, numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.complex64, numpy.complex128], - ids=['numpy.bool_', 'numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32', 'numpy.complex64', 'numpy.complex128']) -@pytest.mark.parametrize("data", - [[[1, 2], [3, 4]]], - ids=['[[1, 2], [3, 4]]']) +@pytest.mark.parametrize( + "dtype1", + [ + numpy.bool_, + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.complex64, + numpy.complex128, + ], + ids=[ + "numpy.bool_", + "numpy.float64", + "numpy.float32", + "numpy.int64", + "numpy.int32", + "numpy.complex64", + "numpy.complex128", + ], +) +@pytest.mark.parametrize( + "dtype2", + [ + numpy.bool_, + numpy.float64, + numpy.float32, + numpy.int64, + numpy.int32, + numpy.complex64, + numpy.complex128, + ], + ids=[ + "numpy.bool_", + "numpy.float64", + "numpy.float32", + "numpy.int64", + "numpy.int32", + "numpy.complex64", + "numpy.complex128", + ], +) +@pytest.mark.parametrize("data", [[[1, 2], [3, 4]]], ids=["[[1, 2], [3, 4]]"]) def test_multiply_dtype(dtype1, dtype2, data): np_a = numpy.array(data, dtype=dtype1) dpnp_a = dpnp.array(data, dtype=dtype1) @@ -75,11 +117,16 @@ def test_multiply_dtype(dtype1, dtype2, data): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("rhs", [[[1, 2, 3], [4, 5, 6]], [2.0, 1.5, 1.0], 3, 0.3]) -@pytest.mark.parametrize("lhs", [[[6, 5, 4], [3, 2, 1]], [1.3, 2.6, 3.9], 5, 0.5]) -@pytest.mark.parametrize("dtype", [numpy.int32, numpy.int64, numpy.float32, numpy.float64]) +@pytest.mark.parametrize( + "rhs", [[[1, 2, 3], [4, 5, 6]], [2.0, 1.5, 1.0], 3, 0.3] +) +@pytest.mark.parametrize( + "lhs", [[[6, 5, 4], [3, 2, 1]], [1.3, 2.6, 3.9], 5, 0.5] +) +@pytest.mark.parametrize( + "dtype", [numpy.int32, numpy.int64, numpy.float32, numpy.float64] +) class TestMathematical: - @staticmethod def array_or_scalar(xp, data, dtype=None): if numpy.isscalar(data): @@ -99,65 +146,80 @@ def _test_mathematical(self, name, dtype, lhs, rhs): numpy.testing.assert_allclose(result, expected, atol=1e-4) def test_add(self, dtype, lhs, rhs): - self._test_mathematical('add', dtype, lhs, rhs) + self._test_mathematical("add", dtype, lhs, rhs) def test_arctan2(self, dtype, lhs, rhs): - self._test_mathematical('arctan2', dtype, lhs, rhs) + self._test_mathematical("arctan2", dtype, lhs, rhs) def test_copysign(self, dtype, lhs, rhs): - self._test_mathematical('copysign', dtype, lhs, rhs) + self._test_mathematical("copysign", dtype, lhs, rhs) def test_divide(self, dtype, lhs, rhs): - self._test_mathematical('divide', dtype, lhs, rhs) + self._test_mathematical("divide", dtype, lhs, rhs) def test_fmod(self, dtype, lhs, rhs): - self._test_mathematical('fmod', dtype, lhs, rhs) + self._test_mathematical("fmod", dtype, lhs, rhs) def test_floor_divide(self, dtype, lhs, rhs): - self._test_mathematical('floor_divide', dtype, lhs, rhs) + self._test_mathematical("floor_divide", dtype, lhs, rhs) def test_hypot(self, dtype, lhs, rhs): - self._test_mathematical('hypot', dtype, lhs, rhs) + self._test_mathematical("hypot", dtype, lhs, rhs) def test_maximum(self, dtype, lhs, rhs): - self._test_mathematical('maximum', dtype, lhs, rhs) + self._test_mathematical("maximum", dtype, lhs, rhs) def test_minimum(self, dtype, lhs, rhs): - self._test_mathematical('minimum', dtype, lhs, rhs) + self._test_mathematical("minimum", dtype, lhs, rhs) def test_multiply(self, dtype, lhs, rhs): - self._test_mathematical('multiply', dtype, lhs, rhs) + self._test_mathematical("multiply", dtype, lhs, rhs) def test_remainder(self, dtype, lhs, rhs): - self._test_mathematical('remainder', dtype, lhs, rhs) + self._test_mathematical("remainder", dtype, lhs, rhs) def test_power(self, dtype, lhs, rhs): - self._test_mathematical('power', dtype, lhs, rhs) + self._test_mathematical("power", dtype, lhs, rhs) def test_subtract(self, dtype, lhs, rhs): - self._test_mathematical('subtract', dtype, lhs, rhs) - - -@pytest.mark.parametrize("val_type", - [bool, int, float], - ids=['bool', 'int', 'float']) -@pytest.mark.parametrize("data_type", - [numpy.bool_, numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.bool_', 'numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32']) -@pytest.mark.parametrize("val", - [0, 1, 5], - ids=['0', '1', '5']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) + self._test_mathematical("subtract", dtype, lhs, rhs) + + +@pytest.mark.parametrize( + "val_type", [bool, int, float], ids=["bool", "int", "float"] +) +@pytest.mark.parametrize( + "data_type", + [numpy.bool_, numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=[ + "numpy.bool_", + "numpy.float64", + "numpy.float32", + "numpy.int64", + "numpy.int32", + ], +) +@pytest.mark.parametrize("val", [0, 1, 5], ids=["0", "1", "5"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_multiply_scalar(array, val, data_type, val_type): np_a = numpy.array(array, dtype=data_type) dpnp_a = dpnp.array(array, dtype=data_type) @@ -172,12 +234,12 @@ def test_multiply_scalar(array, val, data_type, val_type): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("shape", - [(), (3, 2)], - ids=['()', '(3, 2)']) -@pytest.mark.parametrize("dtype", - [numpy.float32, numpy.float64], - ids=['numpy.float32', 'numpy.float64']) +@pytest.mark.parametrize("shape", [(), (3, 2)], ids=["()", "(3, 2)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.float64], + ids=["numpy.float32", "numpy.float64"], +) def test_multiply_scalar2(shape, dtype): np_a = numpy.ones(shape, dtype=dtype) dpnp_a = dpnp.ones(shape, dtype=dtype) @@ -187,9 +249,14 @@ def test_multiply_scalar2(shape, dtype): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("array", [[1, 2, 3, 4, 5], - [1, 2, numpy.nan, 4, 5], - [[1, 2, numpy.nan], [3, -4, -5]]]) +@pytest.mark.parametrize( + "array", + [ + [1, 2, 3, 4, 5], + [1, 2, numpy.nan, 4, 5], + [[1, 2, numpy.nan], [3, -4, -5]], + ], +) def test_nancumprod(array): np_a = numpy.array(array) dpnp_a = dpnp.array(np_a) @@ -199,9 +266,14 @@ def test_nancumprod(array): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("array", [[1, 2, 3, 4, 5], - [1, 2, numpy.nan, 4, 5], - [[1, 2, numpy.nan], [3, -4, -5]]]) +@pytest.mark.parametrize( + "array", + [ + [1, 2, 3, 4, 5], + [1, 2, numpy.nan, 4, 5], + [[1, 2, numpy.nan], [3, -4, -5]], + ], +) def test_nancumsum(array): np_a = numpy.array(array) dpnp_a = dpnp.array(np_a) @@ -211,12 +283,16 @@ def test_nancumsum(array): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("data", - [[[1., -1.], [0.1, -0.1]], [-2, -1, 0, 1, 2]], - ids=['[[1., -1.], [0.1, -0.1]]', '[-2, -1, 0, 1, 2]']) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32']) +@pytest.mark.parametrize( + "data", + [[[1.0, -1.0], [0.1, -0.1]], [-2, -1, 0, 1, 2]], + ids=["[[1., -1.], [0.1, -0.1]]", "[-2, -1, 0, 1, 2]"], +) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float64", "numpy.float32", "numpy.int64", "numpy.int32"], +) def test_negative(data, dtype): np_a = numpy.array(data, dtype=dtype) dpnp_a = dpnp.array(data, dtype=dtype) @@ -226,26 +302,37 @@ def test_negative(data, dtype): numpy.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize("val_type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32']) -@pytest.mark.parametrize("data_type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float64', 'numpy.float32', 'numpy.int64', 'numpy.int32']) -@pytest.mark.parametrize("val", - [0, 1, 5], - ids=['0', '1', '5']) -@pytest.mark.parametrize("array", - [[[0, 0], [0, 0]], - [[1, 2], [1, 2]], - [[1, 2], [3, 4]], - [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], - [[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]], - ids=['[[0, 0], [0, 0]]', - '[[1, 2], [1, 2]]', - '[[1, 2], [3, 4]]', - '[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]', - '[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]']) +@pytest.mark.parametrize( + "val_type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float64", "numpy.float32", "numpy.int64", "numpy.int32"], +) +@pytest.mark.parametrize( + "data_type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float64", "numpy.float32", "numpy.int64", "numpy.int32"], +) +@pytest.mark.parametrize("val", [0, 1, 5], ids=["0", "1", "5"]) +@pytest.mark.parametrize( + "array", + [ + [[0, 0], [0, 0]], + [[1, 2], [1, 2]], + [[1, 2], [3, 4]], + [[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]], + [ + [[[1, 2], [3, 4]], [[1, 2], [2, 1]]], + [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], + ], + ], + ids=[ + "[[0, 0], [0, 0]]", + "[[1, 2], [1, 2]]", + "[[1, 2], [3, 4]]", + "[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]", + "[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]", + ], +) def test_power(array, val, data_type, val_type): np_a = numpy.array(array, dtype=data_type) dpnp_a = dpnp.array(array, dtype=data_type) @@ -256,12 +343,18 @@ def test_power(array, val, data_type, val_type): class TestEdiff1d: - @pytest.mark.parametrize("data_type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("array", [[1, 2, 4, 7, 0], - [], - [1], - [[1, 2, 3], [5, 2, 8], [7, 3, 4]], ]) + @pytest.mark.parametrize( + "data_type", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "array", + [ + [1, 2, 4, 7, 0], + [], + [1], + [[1, 2, 3], [5, 2, 8], [7, 3, 4]], + ], + ) def test_ediff1d_int(self, array, data_type): np_a = numpy.array(array, dtype=data_type) dpnp_a = dpnp.array(array, dtype=data_type) @@ -282,13 +375,13 @@ def test_ediff1d_args(self): class TestTrapz: - @pytest.mark.parametrize("data_type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("array", [[1, 2, 3], - [[1, 2, 3], [4, 5, 6]], - [1, 4, 6, 9, 10, 12], - [], - [1]]) + @pytest.mark.parametrize( + "data_type", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "array", + [[1, 2, 3], [[1, 2, 3], [4, 5, 6]], [1, 4, 6, 9, 10, 12], [], [1]], + ) def test_trapz_default(self, array, data_type): np_a = numpy.array(array, dtype=data_type) dpnp_a = dpnp.array(array, dtype=data_type) @@ -297,14 +390,17 @@ def test_trapz_default(self, array, data_type): expected = numpy.trapz(np_a) numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("data_type_y", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("data_type_x", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("y_array", [[1, 2, 4, 5], - [1., 2.5, 6., 7.]]) + @pytest.mark.parametrize( + "data_type_y", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "data_type_x", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize("y_array", [[1, 2, 4, 5], [1.0, 2.5, 6.0, 7.0]]) @pytest.mark.parametrize("x_array", [[2, 5, 6, 9]]) - def test_trapz_with_x_params(self, y_array, x_array, data_type_y, data_type_x): + def test_trapz_with_x_params( + self, y_array, x_array, data_type_y, data_type_x + ): np_y = numpy.array(y_array, dtype=data_type_y) dpnp_y = dpnp.array(y_array, dtype=data_type_y) @@ -324,8 +420,18 @@ def test_trapz_with_x_param_2ndim(self, array): expected = numpy.trapz(np_a, np_a) numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("y_array", [[1, 2, 4, 5], - [1., 2.5, 6., 7., ]]) + @pytest.mark.parametrize( + "y_array", + [ + [1, 2, 4, 5], + [ + 1.0, + 2.5, + 6.0, + 7.0, + ], + ], + ) @pytest.mark.parametrize("dx", [2, 3, 4]) def test_trapz_with_dx_params(self, y_array, dx): np_y = numpy.array(y_array) @@ -337,27 +443,20 @@ def test_trapz_with_dx_params(self, y_array, dx): class TestCross: - - @pytest.mark.parametrize("axis", [None, 0], - ids=['None', '0']) - @pytest.mark.parametrize("axisc", [-1, 0], - ids=['-1', '0']) - @pytest.mark.parametrize("axisb", [-1, 0], - ids=['-1', '0']) - @pytest.mark.parametrize("axisa", [-1, 0], - ids=['-1', '0']) - @pytest.mark.parametrize("x1", [[1, 2, 3], - [1., 2.5, 6.], - [2, 4, 6]], - ids=['[1, 2, 3]', - '[1., 2.5, 6.]', - '[2, 4, 6]']) - @pytest.mark.parametrize("x2", [[4, 5, 6], - [1., 5., 2.], - [6, 4, 3]], - ids=['[4, 5, 6]', - '[1., 5., 2.]', - '[6, 4, 3]']) + @pytest.mark.parametrize("axis", [None, 0], ids=["None", "0"]) + @pytest.mark.parametrize("axisc", [-1, 0], ids=["-1", "0"]) + @pytest.mark.parametrize("axisb", [-1, 0], ids=["-1", "0"]) + @pytest.mark.parametrize("axisa", [-1, 0], ids=["-1", "0"]) + @pytest.mark.parametrize( + "x1", + [[1, 2, 3], [1.0, 2.5, 6.0], [2, 4, 6]], + ids=["[1, 2, 3]", "[1., 2.5, 6.]", "[2, 4, 6]"], + ) + @pytest.mark.parametrize( + "x2", + [[4, 5, 6], [1.0, 5.0, 2.0], [6, 4, 3]], + ids=["[4, 5, 6]", "[1., 5., 2.]", "[6, 4, 3]"], + ) def test_cross_3x3(self, x1, x2, axisa, axisb, axisc, axis): np_x1 = numpy.array(x1) dpnp_x1 = dpnp.array(x1) @@ -371,36 +470,9 @@ def test_cross_3x3(self, x1, x2, axisa, axisb, axisc, axis): class TestGradient: - - @pytest.mark.parametrize("array", [[2, 3, 6, 8, 4, 9], - [3., 4., 7.5, 9.], - [2, 6, 8, 10]]) - def test_gradient_y1(self, array): - np_y = numpy.array(array) - dpnp_y = dpnp.array(array) - - result = dpnp.gradient(dpnp_y) - expected = numpy.gradient(np_y) - numpy.testing.assert_array_equal(expected, result) - - @pytest.mark.parametrize("array", [[2, 3, 6, 8, 4, 9], - [3., 4., 7.5, 9.], - [2, 6, 8, 10]]) - @pytest.mark.parametrize("dx", [2, 3.5]) - def test_gradient_y1_dx(self, array, dx): - np_y = numpy.array(array) - dpnp_y = dpnp.array(array) - - result = dpnp.gradient(dpnp_y, dx) - expected = numpy.gradient(np_y, dx) - numpy.testing.assert_array_equal(expected, result) - - -class TestGradient: - - @pytest.mark.parametrize("array", [[2, 3, 6, 8, 4, 9], - [3., 4., 7.5, 9.], - [2, 6, 8, 10]]) + @pytest.mark.parametrize( + "array", [[2, 3, 6, 8, 4, 9], [3.0, 4.0, 7.5, 9.0], [2, 6, 8, 10]] + ) def test_gradient_y1(self, array): np_y = numpy.array(array) dpnp_y = dpnp.array(array) @@ -409,9 +481,9 @@ def test_gradient_y1(self, array): expected = numpy.gradient(np_y) numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("array", [[2, 3, 6, 8, 4, 9], - [3., 4., 7.5, 9.], - [2, 6, 8, 10]]) + @pytest.mark.parametrize( + "array", [[2, 3, 6, 8, 4, 9], [3.0, 4.0, 7.5, 9.0], [2, 6, 8, 10]] + ) @pytest.mark.parametrize("dx", [2, 3.5]) def test_gradient_y1_dx(self, array, dx): np_y = numpy.array(array) @@ -423,7 +495,6 @@ def test_gradient_y1_dx(self, array, dx): class TestCeil: - def test_ceil(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -439,9 +510,11 @@ def test_ceil(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -450,9 +523,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.ceil(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -463,7 +536,6 @@ def test_invalid_shape(self, shape): class TestFloor: - def test_floor(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -479,9 +551,11 @@ def test_floor(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -490,9 +564,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.floor(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -503,7 +577,6 @@ def test_invalid_shape(self, shape): class TestTrunc: - def test_trunc(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -519,9 +592,11 @@ def test_trunc(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -530,9 +605,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.trunc(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -543,7 +618,6 @@ def test_invalid_shape(self, shape): class TestPower: - def test_power(self): array1_data = numpy.arange(10) array2_data = numpy.arange(5, 15) @@ -562,9 +636,11 @@ def test_power(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array1 = dpnp.arange(10, dtype=dpnp.float64) @@ -574,9 +650,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.power(dp_array1, dp_array2, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array1 = dpnp.arange(10, dtype=dpnp.float64) diff --git a/tests/test_mixins.py b/tests/test_mixins.py index 2cb4f1b71a75..e2ffb26ffd24 100644 --- a/tests/test_mixins.py +++ b/tests/test_mixins.py @@ -1,25 +1,32 @@ import unittest -import dpnp as inp - import numpy +import dpnp as inp -class TestMatMul(unittest.TestCase): +class TestMatMul(unittest.TestCase): def test_matmul(self): - array_data = [1., 2., 3., 4.] + array_data = [1.0, 2.0, 3.0, 4.0] size = 2 # DPNP - array1 = inp.reshape(inp.array(array_data, dtype=inp.float64), (size, size)) - array2 = inp.reshape(inp.array(array_data, dtype=inp.float64), (size, size)) + array1 = inp.reshape( + inp.array(array_data, dtype=inp.float64), (size, size) + ) + array2 = inp.reshape( + inp.array(array_data, dtype=inp.float64), (size, size) + ) result = inp.matmul(array1, array2) # print(result) # original - array_1 = numpy.array(array_data, dtype=numpy.float64).reshape((size, size)) - array_2 = numpy.array(array_data, dtype=numpy.float64).reshape((size, size)) + array_1 = numpy.array(array_data, dtype=numpy.float64).reshape( + (size, size) + ) + array_2 = numpy.array(array_data, dtype=numpy.float64).reshape( + (size, size) + ) expected = numpy.matmul(array_1, array_2) # print(expected) @@ -29,8 +36,8 @@ def test_matmul(self): # self.assertEqual(expected, result) def test_matmul2(self): - array_data1 = [1., 2., 3., 4., 5., 6.] - array_data2 = [1., 2., 3., 4., 5., 6., 7., 8.] + array_data1 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + array_data2 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0] # DPNP array1 = inp.reshape(inp.array(array_data1, dtype=inp.float64), (3, 2)) @@ -65,5 +72,5 @@ def test_matmul3(self): numpy.testing.assert_array_equal(expected, result) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_random.py b/tests/test_random.py index fa7da686065f..afdf1c0cc1f8 100644 --- a/tests/test_random.py +++ b/tests/test_random.py @@ -1,14 +1,14 @@ -import pytest +import math import unittest -import dpnp.random import numpy -from numpy.testing import (assert_allclose, assert_array_equal) -import math +import pytest +from numpy.testing import assert_allclose, assert_array_equal +import dpnp.random -class TestDistribution(unittest.TestCase): +class TestDistribution(unittest.TestCase): def check_extreme_value(self, dist_name, val, params): seed = 28041990 size = 10 @@ -17,7 +17,9 @@ def check_extreme_value(self, dist_name, val, params): assert len(numpy.unique(res)) == 1 assert numpy.unique(res)[0] == val - def check_moments(self, dist_name, expected_mean, expected_var, params, size=10**5): + def check_moments( + self, dist_name, expected_mean, expected_var, params, size=10**5 + ): seed = 28041995 dpnp.random.seed(seed) res = dpnp.asnumpy(getattr(dpnp.random, dist_name)(size=size, **params)) @@ -41,11 +43,11 @@ def check_seed(self, dist_name, params): assert_allclose(a1, a2, rtol=1e-07, atol=0) -@pytest.mark.parametrize("func", - [dpnp.random.chisquare, - dpnp.random.rand, - dpnp.random.randn], - ids=['chisquare', 'rand', 'randn']) +@pytest.mark.parametrize( + "func", + [dpnp.random.chisquare, dpnp.random.rand, dpnp.random.randn], + ids=["chisquare", "rand", "randn"], +) def test_input_size(func): output_shape = (10,) size = 10 @@ -57,14 +59,17 @@ def test_input_size(func): assert output_shape == res.shape -@pytest.mark.parametrize("func", - [dpnp.random.chisquare, - dpnp.random.random, - dpnp.random.random_sample, - dpnp.random.ranf, - dpnp.random.sample], - ids=['chisquare', 'random', 'random_sample', - 'ranf', 'sample']) +@pytest.mark.parametrize( + "func", + [ + dpnp.random.chisquare, + dpnp.random.random, + dpnp.random.random_sample, + dpnp.random.ranf, + dpnp.random.sample, + ], + ids=["chisquare", "random", "random_sample", "ranf", "sample"], +) def test_input_shape(func): shape = (10, 5) df = 3 # for dpnp.random.chisquare @@ -75,15 +80,17 @@ def test_input_shape(func): assert shape == res.shape -@pytest.mark.parametrize("func", - [dpnp.random.random, - dpnp.random.random_sample, - dpnp.random.ranf, - dpnp.random.sample, - dpnp.random.rand], - ids=['random', 'random_sample', - 'ranf', 'sample', - 'rand']) +@pytest.mark.parametrize( + "func", + [ + dpnp.random.random, + dpnp.random.random_sample, + dpnp.random.ranf, + dpnp.random.sample, + dpnp.random.rand, + ], + ids=["random", "random_sample", "ranf", "sample", "rand"], +) def test_check_output(func): shape = (10, 5) size = 10 * 5 @@ -95,14 +102,17 @@ def test_check_output(func): assert dpnp.all(res < 1) -@pytest.mark.parametrize("func", - [dpnp.random.random, - dpnp.random.random_sample, - dpnp.random.ranf, - dpnp.random.sample, - dpnp.random.rand], - ids=['random', 'random_sample', - 'ranf', 'sample', 'rand']) +@pytest.mark.parametrize( + "func", + [ + dpnp.random.random, + dpnp.random.random_sample, + dpnp.random.ranf, + dpnp.random.sample, + dpnp.random.rand, + ], + ids=["random", "random_sample", "ranf", "sample", "rand"], +) def test_seed(func): seed = 28041990 size = 100 @@ -119,15 +129,10 @@ def test_seed(func): def test_randn_normal_distribution(): - """ - Check the moments of the normal distribution sample obtained - from ``dpnp.random.randn``. - - """ + """Check the moments of the normal distribution sample obtained from ``dpnp.random.randn``.""" seed = 28041995 pts = 10**5 - alpha = 0.05 expected_mean = 0.0 expected_var = 1.0 @@ -140,406 +145,442 @@ def test_randn_normal_distribution(): class TestDistributionsBeta(TestDistribution): - def test_moments(self): a = 2.56 b = 0.8 expected_mean = a / (a + b) - expected_var = (a * b) / ((a + b)**2 * (a + b + 1)) - self.check_moments('beta', expected_mean, expected_var, {'a': a, 'b': b}) + expected_var = (a * b) / ((a + b) ** 2 * (a + b + 1)) + self.check_moments( + "beta", expected_mean, expected_var, {"a": a, "b": b} + ) def test_invalid_args(self): - a = 3.0 # OK + a = 3.0 # OK b = -1.0 # positive `b` is expected - self.check_invalid_args('beta', {'a': a, 'b': b}) + self.check_invalid_args("beta", {"a": a, "b": b}) a = -1.0 # positive `a` is expected - b = 3.0 # OK - self.check_invalid_args('beta', {'a': a, 'b': b}) + b = 3.0 # OK + self.check_invalid_args("beta", {"a": a, "b": b}) def test_seed(self): a = 2.56 b = 0.8 - self.check_seed('beta', {'a': a, 'b': b}) + self.check_seed("beta", {"a": a, "b": b}) class TestDistributionsBinomial(TestDistribution): - def test_extreme_value(self): n = 5 p = 0.0 expected_val = p - self.check_extreme_value('binomial', expected_val, {'n': n, 'p': p}) + self.check_extreme_value("binomial", expected_val, {"n": n, "p": p}) n = 0 p = 0.5 expected_val = n - self.check_extreme_value('binomial', expected_val, {'n': n, 'p': p}) + self.check_extreme_value("binomial", expected_val, {"n": n, "p": p}) n = 5 p = 1.0 expected_val = n - self.check_extreme_value('binomial', expected_val, {'n': n, 'p': p}) + self.check_extreme_value("binomial", expected_val, {"n": n, "p": p}) def test_moments(self): n = 5 p = 0.8 expected_mean = n * p expected_var = n * p * (1 - p) - self.check_moments('binomial', expected_mean, - expected_var, {'n': n, 'p': p}) + self.check_moments( + "binomial", expected_mean, expected_var, {"n": n, "p": p} + ) def test_invalid_args(self): - n = -5 # non-negative `n` is expected - p = 0.4 # OK - self.check_invalid_args('binomial', {'n': n, 'p': p}) - n = 5 # OK - p = -0.5 # `p` is expected from [0, 1] - self.check_invalid_args('binomial', {'n': n, 'p': p}) + n = -5 # non-negative `n` is expected + p = 0.4 # OK + self.check_invalid_args("binomial", {"n": n, "p": p}) + n = 5 # OK + p = -0.5 # `p` is expected from [0, 1] + self.check_invalid_args("binomial", {"n": n, "p": p}) def test_seed(self): - n, p = 10, .5 # number of trials, probability of each trial - self.check_seed('binomial', {'n': n, 'p': p}) + n, p = 10, 0.5 # number of trials, probability of each trial + self.check_seed("binomial", {"n": n, "p": p}) class TestDistributionsChisquare(TestDistribution): - def test_invalid_args(self): df = -1 # positive `df` is expected - self.check_invalid_args('chisquare', {'df': df}) + self.check_invalid_args("chisquare", {"df": df}) def test_seed(self): df = 3 # number of degrees of freedom - self.check_seed('chisquare', {'df': df}) + self.check_seed("chisquare", {"df": df}) class TestDistributionsExponential(TestDistribution): - def test_invalid_args(self): scale = -1 # non-negative `scale` is expected - self.check_invalid_args('exponential', {'scale': scale}) + self.check_invalid_args("exponential", {"scale": scale}) def test_seed(self): scale = 3 # number of degrees of freedom - self.check_seed('exponential', {'scale': scale}) + self.check_seed("exponential", {"scale": scale}) class TestDistributionsF(TestDistribution): - def test_moments(self): dfnum = 12.56 dfden = 13.0 # for dfden > 2 expected_mean = dfden / (dfden - 2) # for dfden > 4 - expected_var = 2 * (dfden ** 2) * (dfnum + dfden - 2) / (dfnum * ((dfden - 2) ** 2) * ((dfden - 4))) - self.check_moments('f', expected_mean, expected_var, - {'dfnum': dfnum, 'dfden': dfden}) + expected_var = ( + 2 + * (dfden**2) + * (dfnum + dfden - 2) + / (dfnum * ((dfden - 2) ** 2) * ((dfden - 4))) + ) + self.check_moments( + "f", expected_mean, expected_var, {"dfnum": dfnum, "dfden": dfden} + ) def test_invalid_args(self): - size = 10 - dfnum = -1.0 # positive `dfnum` is expected - dfden = 1.0 # OK - self.check_invalid_args('f', {'dfnum': dfnum, 'dfden': dfden}) - dfnum = 1.0 # OK - dfden = -1.0 # positive `dfden` is expected - self.check_invalid_args('f', {'dfnum': dfnum, 'dfden': dfden}) + dfnum = -1.0 # positive `dfnum` is expected + dfden = 1.0 # OK + self.check_invalid_args("f", {"dfnum": dfnum, "dfden": dfden}) + dfnum = 1.0 # OK + dfden = -1.0 # positive `dfden` is expected + self.check_invalid_args("f", {"dfnum": dfnum, "dfden": dfden}) def test_seed(self): - dfnum = 3.56 # `dfden` param for Wald distr - dfden = 2.8 # `dfden` param for Wald distr - self.check_seed('f', {'dfnum': dfnum, 'dfden': dfden}) + dfnum = 3.56 # `dfden` param for Wald distr + dfden = 2.8 # `dfden` param for Wald distr + self.check_seed("f", {"dfnum": dfnum, "dfden": dfden}) class TestDistributionsGamma(TestDistribution): - def test_moments(self): shape = 2.56 scale = 0.8 expected_mean = shape * scale expected_var = shape * scale * scale - self.check_moments('gamma', expected_mean, expected_var, - {'shape': shape, 'scale': scale}) + self.check_moments( + "gamma", + expected_mean, + expected_var, + {"shape": shape, "scale": scale}, + ) def test_invalid_args(self): - size = 10 - shape = -1 # non-negative `shape` is expected - self.check_invalid_args('gamma', {'shape': shape}) - shape = 1.0 # OK + shape = -1 # non-negative `shape` is expected + self.check_invalid_args("gamma", {"shape": shape}) + shape = 1.0 # OK scale = -1.0 # non-negative `shape` is expected - self.check_invalid_args('gamma', {'shape': shape, 'scale': scale}) + self.check_invalid_args("gamma", {"shape": shape, "scale": scale}) def test_seed(self): shape = 3.0 # shape param for gamma distr - self.check_seed('gamma', {'shape': shape}) + self.check_seed("gamma", {"shape": shape}) class TestDistributionsGeometric(TestDistribution): - def test_extreme_value(self): p = 1.0 expected_val = p - self.check_extreme_value('geometric', expected_val, {'p': p}) + self.check_extreme_value("geometric", expected_val, {"p": p}) def test_moments(self): p = 0.8 expected_mean = (1 - p) / p expected_var = (1 - p) / (p**2) - self.check_moments('geometric', expected_mean, expected_var, {'p': p}) + self.check_moments("geometric", expected_mean, expected_var, {"p": p}) def test_invalid_args(self): - size = 10 p = -1.0 # `p` is expected from (0, 1] - self.check_invalid_args('geometric', {'p': p}) + self.check_invalid_args("geometric", {"p": p}) def test_seed(self): p = 0.8 - self.check_seed('geometric', {'p': p}) + self.check_seed("geometric", {"p": p}) class TestDistributionsGumbel(TestDistribution): - def test_extreme_value(self): loc = 5 scale = 0.0 expected_val = loc - self.check_extreme_value('gumbel', expected_val, - {'loc': loc, 'scale': scale}) + self.check_extreme_value( + "gumbel", expected_val, {"loc": loc, "scale": scale} + ) def test_moments(self): loc = 12 scale = 0.8 expected_mean = loc + scale * numpy.euler_gamma - expected_var = (numpy.pi**2 / 6) * (scale ** 2) - self.check_moments('gumbel', expected_mean, - expected_var, {'loc': loc, 'scale': scale}) + expected_var = (numpy.pi**2 / 6) * (scale**2) + self.check_moments( + "gumbel", expected_mean, expected_var, {"loc": loc, "scale": scale} + ) def test_invalid_args(self): - size = 10 - loc = 3.0 # OK + loc = 3.0 # OK scale = -1.0 # non-negative `scale` is expected - self.check_invalid_args('gumbel', {'loc': loc, 'scale': scale}) + self.check_invalid_args("gumbel", {"loc": loc, "scale": scale}) def test_seed(self): loc = 2.56 scale = 0.8 - self.check_seed('gumbel', {'loc': loc, 'scale': scale}) + self.check_seed("gumbel", {"loc": loc, "scale": scale}) class TestDistributionsHypergeometric(TestDistribution): - def test_extreme_value(self): ngood = 100 nbad = 0 nsample = 10 expected_val = nsample - self.check_extreme_value('hypergeometric', expected_val, - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_extreme_value( + "hypergeometric", + expected_val, + {"ngood": ngood, "nbad": nbad, "nsample": nsample}, + ) ngood = 0 nbad = 11 nsample = 10 expected_val = 0 - self.check_extreme_value('hypergeometric', expected_val, - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_extreme_value( + "hypergeometric", + expected_val, + {"ngood": ngood, "nbad": nbad, "nsample": nsample}, + ) def test_moments(self): ngood = 100 nbad = 2 nsample = 10 expected_mean = nsample * (ngood / (ngood + nbad)) - expected_var = expected_mean * (nbad / (ngood + nbad)) * (((ngood + nbad) - nsample) / ((ngood + nbad) - 1)) - self.check_moments('hypergeometric', expected_mean, expected_var, - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + expected_var = ( + expected_mean + * (nbad / (ngood + nbad)) + * (((ngood + nbad) - nsample) / ((ngood + nbad) - 1)) + ) + self.check_moments( + "hypergeometric", + expected_mean, + expected_var, + {"ngood": ngood, "nbad": nbad, "nsample": nsample}, + ) def test_invalid_args(self): - size = 10 - ngood = 100 # OK - nbad = 2 # OK + ngood = 100 # OK + nbad = 2 # OK nsample = -10 # non-negative `nsamp` is expected - self.check_invalid_args('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) - - ngood = 100 # OK - nbad = -2 # non-negative `nbad` is expected - nsample = 10 # OK - self.check_invalid_args('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) - - ngood = -100 # non-negative `ngood` is expected - nbad = 2 # OK - nsample = 10 # OK - self.check_invalid_args('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_invalid_args( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) + + ngood = 100 # OK + nbad = -2 # non-negative `nbad` is expected + nsample = 10 # OK + self.check_invalid_args( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) + + ngood = -100 # non-negative `ngood` is expected + nbad = 2 # OK + nsample = 10 # OK + self.check_invalid_args( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) ngood = 10 nbad = 2 nsample = 100 # ngood + nbad >= nsample expected - self.check_invalid_args('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_invalid_args( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) - ngood = 10 # OK - nbad = 2 # OK + ngood = 10 # OK + nbad = 2 # OK nsample = 0 # `nsample` is expected > 0 - self.check_invalid_args('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_invalid_args( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) def test_seed(self): ngood = 100 nbad = 2 nsample = 10 - self.check_seed('hypergeometric', - {'ngood': ngood, 'nbad': nbad, 'nsample': nsample}) + self.check_seed( + "hypergeometric", {"ngood": ngood, "nbad": nbad, "nsample": nsample} + ) class TestDistributionsLaplace(TestDistribution): - def test_extreme_value(self): loc = 5 scale = 0.0 expected_val = scale - self.check_extreme_value('laplace', expected_val, - {'loc': loc, 'scale': scale}) + self.check_extreme_value( + "laplace", expected_val, {"loc": loc, "scale": scale} + ) def test_moments(self): loc = 2.56 scale = 0.8 expected_mean = loc expected_var = 2 * scale * scale - self.check_moments('laplace', expected_mean, - expected_var, {'loc': loc, 'scale': scale}) + self.check_moments( + "laplace", expected_mean, expected_var, {"loc": loc, "scale": scale} + ) def test_invalid_args(self): - loc = 3.0 # OK + loc = 3.0 # OK scale = -1.0 # positive `b` is expected - self.check_invalid_args('laplace', - {'loc': loc, 'scale': scale}) + self.check_invalid_args("laplace", {"loc": loc, "scale": scale}) def test_seed(self): loc = 2.56 scale = 0.8 - self.check_seed('laplace', {'loc': loc, 'scale': scale}) + self.check_seed("laplace", {"loc": loc, "scale": scale}) class TestDistributionsLogistic(TestDistribution): - def test_moments(self): loc = 2.56 scale = 0.8 expected_mean = loc - expected_var = (scale ** 2) * (numpy.pi ** 2) / 3 - self.check_moments('logistic', expected_mean, - expected_var, {'loc': loc, 'scale': scale}) + expected_var = (scale**2) * (numpy.pi**2) / 3 + self.check_moments( + "logistic", + expected_mean, + expected_var, + {"loc": loc, "scale": scale}, + ) def test_invalid_args(self): - loc = 3.0 # OK + loc = 3.0 # OK scale = -1.0 # non-negative `scale` is expected - self.check_invalid_args('logistic', - {'loc': loc, 'scale': scale}) + self.check_invalid_args("logistic", {"loc": loc, "scale": scale}) def test_seed(self): loc = 2.56 scale = 0.8 - self.check_seed('logistic', {'loc': loc, 'scale': scale}) + self.check_seed("logistic", {"loc": loc, "scale": scale}) class TestDistributionsLognormal(TestDistribution): - def test_extreme_value(self): mean = 0.5 sigma = 0.0 - expected_val = numpy.exp(mean + (sigma ** 2) / 2) - self.check_extreme_value('lognormal', expected_val, - {'mean': mean, 'sigma': sigma}) + expected_val = numpy.exp(mean + (sigma**2) / 2) + self.check_extreme_value( + "lognormal", expected_val, {"mean": mean, "sigma": sigma} + ) def test_moments(self): mean = 0.5 sigma = 0.8 - expected_mean = numpy.exp(mean + (sigma ** 2) / 2) - expected_var = (numpy.exp(sigma**2) - 1) * numpy.exp(2 * mean + sigma**2) - self.check_moments('lognormal', expected_mean, - expected_var, {'mean': mean, 'sigma': sigma}) + expected_mean = numpy.exp(mean + (sigma**2) / 2) + expected_var = (numpy.exp(sigma**2) - 1) * numpy.exp( + 2 * mean + sigma**2 + ) + self.check_moments( + "lognormal", + expected_mean, + expected_var, + {"mean": mean, "sigma": sigma}, + ) def test_invalid_args(self): mean = 0.0 sigma = -1.0 # non-negative `sigma` is expected - self.check_invalid_args('lognormal', {'mean': mean, 'sigma': sigma}) + self.check_invalid_args("lognormal", {"mean": mean, "sigma": sigma}) def test_seed(self): mean = 0.0 sigma = 0.8 - self.check_seed('lognormal', {'mean': mean, 'sigma': sigma}) + self.check_seed("lognormal", {"mean": mean, "sigma": sigma}) class TestDistributionsMultinomial(TestDistribution): - def test_extreme_value(self): n = 0 - pvals = [1 / 6.] * 6 - self.check_extreme_value('multinomial', n, {'n': n, 'pvals': pvals}) + pvals = [1 / 6.0] * 6 + self.check_extreme_value("multinomial", n, {"n": n, "pvals": pvals}) def test_moments(self): n = 10 - pvals = [1 / 6.] * 6 - size = 10**5 + pvals = [1 / 6.0] * 6 expected_mean = n * pvals[0] expected_var = n * pvals[0] * (1 - pvals[0]) - self.check_moments('multinomial', expected_mean, - expected_var, {'n': n, 'pvals': pvals}) + self.check_moments( + "multinomial", expected_mean, expected_var, {"n": n, "pvals": pvals} + ) def test_check_sum(self): seed = 28041990 size = 1 n = 20 - pvals = [1 / 6.] * 6 + pvals = [1 / 6.0] * 6 dpnp.random.seed(seed) res = dpnp.random.multinomial(n, pvals, size) assert_allclose(n, dpnp.asnumpy(res).sum(), rtol=1e-07, atol=0) def test_invalid_args(self): - n = -10 # parameter `n`, non-negative expected - pvals = [1 / 6.] * 6 # parameter `pvals`, OK - self.check_invalid_args('multinomial', {'n': n, 'pvals': pvals}) - n = 10 # parameter `n`, OK - pvals = [-1 / 6.] * 6 # parameter `pvals`, sum(pvals) expected between [0, 1] - self.check_invalid_args('multinomial', {'n': n, 'pvals': pvals}) + n = -10 # parameter `n`, non-negative expected + pvals = [1 / 6.0] * 6 # parameter `pvals`, OK + self.check_invalid_args("multinomial", {"n": n, "pvals": pvals}) + n = 10 # parameter `n`, OK + pvals = [ + -1 / 6.0 + ] * 6 # parameter `pvals`, sum(pvals) expected between [0, 1] + self.check_invalid_args("multinomial", {"n": n, "pvals": pvals}) def test_seed(self): n = 20 - pvals = [1 / 6.] * 6 - self.check_seed('multinomial', {'n': n, 'pvals': pvals}) + pvals = [1 / 6.0] * 6 + self.check_seed("multinomial", {"n": n, "pvals": pvals}) def test_seed1(self): # pvals_size >= ntrial * 16 && ntrial <= 16 n = 4 pvals_size = 16 * n pvals = [1 / pvals_size] * pvals_size - self.check_seed('multinomial', {'n': n, 'pvals': pvals}) + self.check_seed("multinomial", {"n": n, "pvals": pvals}) class TestDistributionsMultivariateNormal(TestDistribution): - def test_moments(self): seed = 2804183 dpnp.random.seed(seed) mean = [2.56, 3.23] cov = [[1, 0], [0, 1]] size = 10**5 - res = dpnp.asnumpy(dpnp.random.multivariate_normal(mean=mean, cov=cov, size=size)) + res = dpnp.asnumpy( + dpnp.random.multivariate_normal(mean=mean, cov=cov, size=size) + ) res_mean = [numpy.mean(res.T[0]), numpy.mean(res.T[1])] assert_allclose(res_mean, mean, rtol=1e-02, atol=0) def test_invalid_args(self): mean = [2.56, 3.23] # OK - cov = [[1, 0]] # `mean` and `cov` must have same length - self.check_invalid_args('multivariate_normal', {'mean': mean, 'cov': cov}) - mean = [[2.56, 3.23]] # `mean` must be 1 dimensional + cov = [[1, 0]] # `mean` and `cov` must have same length + self.check_invalid_args( + "multivariate_normal", {"mean": mean, "cov": cov} + ) + mean = [[2.56, 3.23]] # `mean` must be 1 dimensional cov = [[1, 0], [0, 1]] # OK - self.check_invalid_args('multivariate_normal', {'mean': mean, 'cov': cov}) + self.check_invalid_args( + "multivariate_normal", {"mean": mean, "cov": cov} + ) mean = [2.56, 3.23] # OK - cov = [1, 0, 0, 1] # `cov` must be 2 dimensional and square - self.check_invalid_args('multivariate_normal', {'mean': mean, 'cov': cov}) + cov = [1, 0, 0, 1] # `cov` must be 2 dimensional and square + self.check_invalid_args( + "multivariate_normal", {"mean": mean, "cov": cov} + ) def test_output_shape_check(self): seed = 28041990 - size = 100 mean = [2.56, 3.23] cov = [[1, 0], [0, 1]] expected_shape = (100, 2) @@ -550,18 +591,19 @@ def test_output_shape_check(self): def test_seed(self): mean = [2.56, 3.23] cov = [[1, 0], [0, 1]] - self.check_seed('multivariate_normal', {'mean': mean, 'cov': cov}) + self.check_seed("multivariate_normal", {"mean": mean, "cov": cov}) class TestDistributionsNegativeBinomial(TestDistribution): - def test_extreme_value(self): seed = 28041990 dpnp.random.seed(seed) n = 5 p = 1.0 check_val = 0.0 - self.check_extreme_value('negative_binomial', check_val, {'n': n, 'p': p}) + self.check_extreme_value( + "negative_binomial", check_val, {"n": n, "p": p} + ) n = 5 p = 0.0 res = dpnp.asnumpy(dpnp.random.negative_binomial(n=n, p=p, size=10)) @@ -570,56 +612,61 @@ def test_extreme_value(self): assert numpy.unique(res)[0] == check_val def test_invalid_args(self): - n = 10 # parameter `n`, OK + n = 10 # parameter `n`, OK p = -0.5 # parameter `p`, expected between [0, 1] - self.check_invalid_args('negative_binomial', {'n': n, 'p': p}) - n = -10 # parameter `n`, expected non-negative - p = 0.5 # parameter `p`, OK - self.check_invalid_args('negative_binomial', {'n': n, 'p': p}) + self.check_invalid_args("negative_binomial", {"n": n, "p": p}) + n = -10 # parameter `n`, expected non-negative + p = 0.5 # parameter `p`, OK + self.check_invalid_args("negative_binomial", {"n": n, "p": p}) def test_seed(self): - n, p = 10, .5 # number of trials, probability of each trial - self.check_seed('negative_binomial', {'n': n, 'p': p}) + n, p = 10, 0.5 # number of trials, probability of each trial + self.check_seed("negative_binomial", {"n": n, "p": p}) class TestDistributionsNormal(TestDistribution): - def test_extreme_value(self): loc = 5 scale = 0.0 expected_val = loc - self.check_extreme_value('normal', expected_val, {'loc': loc, 'scale': scale}) + self.check_extreme_value( + "normal", expected_val, {"loc": loc, "scale": scale} + ) def test_moments(self): loc = 2.56 scale = 0.8 expected_mean = loc expected_var = scale**2 - self.check_moments('normal', expected_mean, - expected_var, {'loc': loc, 'scale': scale}) + self.check_moments( + "normal", expected_mean, expected_var, {"loc": loc, "scale": scale} + ) def test_invalid_args(self): - loc = 3.0 # OK + loc = 3.0 # OK scale = -1.0 # non-negative `scale` is expected - self.check_invalid_args('normal', {'loc': loc, 'scale': scale}) + self.check_invalid_args("normal", {"loc": loc, "scale": scale}) def test_seed(self): loc = 2.56 scale = 0.8 - self.check_seed('normal', {'loc': loc, 'scale': scale}) + self.check_seed("normal", {"loc": loc, "scale": scale}) class TestDistributionsNoncentralChisquare: - - @pytest.mark.parametrize("df", [5.0, 1.0, 0.5], ids=['df_grt_1', 'df_eq_1', 'df_less_1']) + @pytest.mark.parametrize( + "df", [5.0, 1.0, 0.5], ids=["df_grt_1", "df_eq_1", "df_less_1"] + ) def test_moments(self, df): - nonc = 20. + nonc = 20.0 expected_mean = df + nonc expected_var = 2 * (df + 2 * nonc) size = 10**6 seed = 28041995 dpnp.random.seed(seed) - res = dpnp.asnumpy(dpnp.random.noncentral_chisquare(df, nonc, size=size)) + res = dpnp.asnumpy( + dpnp.random.noncentral_chisquare(df, nonc, size=size) + ) var = numpy.var(res) mean = numpy.mean(res) assert math.isclose(var, expected_var, abs_tol=0.6) @@ -627,16 +674,18 @@ def test_moments(self, df): def test_invalid_args(self): size = 10 - df = 5.0 # OK + df = 5.0 # OK nonc = -1.0 # non-negative `nonc` is expected with pytest.raises(ValueError): dpnp.random.noncentral_chisquare(df, nonc, size=size) - df = -1.0 # positive `df` is expected - nonc = 1.0 # OK + df = -1.0 # positive `df` is expected + nonc = 1.0 # OK with pytest.raises(ValueError): dpnp.random.noncentral_chisquare(df, nonc, size=size) - @pytest.mark.parametrize("df", [5.0, 1.0, 0.5], ids=['df_grt_1', 'df_eq_1', 'df_less_1']) + @pytest.mark.parametrize( + "df", [5.0, 1.0, 0.5], ids=["df_grt_1", "df_eq_1", "df_less_1"] + ) def test_seed(self, df): seed = 28041990 size = 10 @@ -649,214 +698,220 @@ def test_seed(self, df): class TestDistributionsPareto(TestDistribution): - def test_moments(self): a = 30.0 expected_mean = a / (a - 1) - expected_var = a / (((a - 1)**2) * (a - 2)) - self.check_moments('pareto', expected_mean, - expected_var, {'a': a}) + expected_var = a / (((a - 1) ** 2) * (a - 2)) + self.check_moments("pareto", expected_mean, expected_var, {"a": a}) def test_invalid_args(self): - size = 10 a = -1.0 # positive `a` is expected - self.check_invalid_args('pareto', {'a': a}) + self.check_invalid_args("pareto", {"a": a}) def test_seed(self): a = 3.0 # a param for pareto distr - self.check_seed('pareto', {'a': a}) + self.check_seed("pareto", {"a": a}) class TestDistributionsPoisson(TestDistribution): - def test_extreme_value(self): lam = 0.0 - self.check_extreme_value('poisson', lam, {'lam': lam}) + self.check_extreme_value("poisson", lam, {"lam": lam}) def test_moments(self): lam = 0.8 expected_mean = lam expected_var = lam - self.check_moments('poisson', expected_mean, - expected_var, {'lam': lam}) + self.check_moments("poisson", expected_mean, expected_var, {"lam": lam}) def test_invalid_args(self): - lam = -1.0 # non-negative `lam` is expected - self.check_invalid_args('poisson', {'lam': lam}) + lam = -1.0 # non-negative `lam` is expected + self.check_invalid_args("poisson", {"lam": lam}) def test_seed(self): lam = 0.8 - self.check_seed('poisson', {'lam': lam}) + self.check_seed("poisson", {"lam": lam}) class TestDistributionsPower(TestDistribution): - def test_moments(self): a = 30.0 neg_a = -a expected_mean = neg_a / (neg_a - 1) - expected_var = neg_a / (((neg_a - 1)**2) * (neg_a - 2)) - self.check_moments('power', expected_mean, - expected_var, {'a': a}) + expected_var = neg_a / (((neg_a - 1) ** 2) * (neg_a - 2)) + self.check_moments("power", expected_mean, expected_var, {"a": a}) def test_invalid_args(self): - size = 10 a = -1.0 # positive `a` is expected - self.check_invalid_args('power', {'a': a}) + self.check_invalid_args("power", {"a": a}) def test_seed(self): a = 3.0 # a param for pareto distr - self.check_seed('power', {'a': a}) + self.check_seed("power", {"a": a}) class TestDistributionsRayleigh(TestDistribution): - def test_extreme_value(self): scale = 0.0 - self.check_extreme_value('rayleigh', scale, {'scale': scale}) + self.check_extreme_value("rayleigh", scale, {"scale": scale}) def test_moments(self): scale = 0.8 expected_mean = scale * numpy.sqrt(numpy.pi / 2) expected_var = ((4 - numpy.pi) / 2) * scale * scale - self.check_moments('rayleigh', expected_mean, - expected_var, {'scale': scale}) + self.check_moments( + "rayleigh", expected_mean, expected_var, {"scale": scale} + ) def test_invalid_args(self): scale = -1.0 # positive `b` is expected - self.check_invalid_args('rayleigh', {'scale': scale}) + self.check_invalid_args("rayleigh", {"scale": scale}) def test_seed(self): scale = 0.8 - self.check_seed('rayleigh', {'scale': scale}) + self.check_seed("rayleigh", {"scale": scale}) class TestDistributionsStandardCauchy(TestDistribution): - def test_seed(self): - self.check_seed('standard_cauchy', {}) + self.check_seed("standard_cauchy", {}) class TestDistributionsStandardExponential(TestDistribution): - def test_moments(self): - shape = 0.8 expected_mean = 1.0 expected_var = 1.0 - self.check_moments('standard_exponential', - expected_mean, expected_var, {}) + self.check_moments( + "standard_exponential", expected_mean, expected_var, {} + ) def test_seed(self): - self.check_seed('standard_exponential', {}) + self.check_seed("standard_exponential", {}) class TestDistributionsStandardGamma(TestDistribution): - def test_extreme_value(self): - self.check_extreme_value('standard_gamma', 0.0, {'shape': 0.0}) + self.check_extreme_value("standard_gamma", 0.0, {"shape": 0.0}) def test_moments(self): shape = 0.8 expected_mean = shape expected_var = shape - self.check_moments('standard_gamma', expected_mean, - expected_var, {'shape': shape}) + self.check_moments( + "standard_gamma", expected_mean, expected_var, {"shape": shape} + ) def test_invalid_args(self): - shape = -1 # non-negative `shape` is expected - self.check_invalid_args('standard_gamma', {'shape': shape}) + shape = -1 # non-negative `shape` is expected + self.check_invalid_args("standard_gamma", {"shape": shape}) def test_seed(self): - self.check_seed('standard_gamma', {'shape': 0.0}) + self.check_seed("standard_gamma", {"shape": 0.0}) class TestDistributionsStandardNormal(TestDistribution): - def test_moments(self): expected_mean = 0.0 expected_var = 1.0 - self.check_moments('standard_normal', - expected_mean, expected_var, {}) + self.check_moments("standard_normal", expected_mean, expected_var, {}) def test_seed(self): - self.check_seed('standard_normal', {}) + self.check_seed("standard_normal", {}) class TestDistributionsStandardT(TestDistribution): - def test_moments(self): df = 300.0 expected_mean = 0.0 expected_var = df / (df - 2) - self.check_moments('standard_t', expected_mean, - expected_var, {'df': df}) + self.check_moments( + "standard_t", expected_mean, expected_var, {"df": df} + ) def test_invalid_args(self): - df = 0.0 # positive `df` is expected - self.check_invalid_args('standard_t', {'df': df}) + df = 0.0 # positive `df` is expected + self.check_invalid_args("standard_t", {"df": df}) def test_seed(self): - self.check_seed('standard_t', {'df': 10.0}) + self.check_seed("standard_t", {"df": 10.0}) class TestDistributionsTriangular(TestDistribution): - def test_moments(self): left = 1.0 mode = 2.0 right = 3.0 expected_mean = (left + mode + right) / 3 - expected_var = (left ** 2 + mode ** 2 + right ** 2 - left * mode - left * right - mode * right) / 18 - self.check_moments('triangular', expected_mean, - expected_var, {'left': left, 'mode': mode, 'right': right}) + expected_var = ( + left**2 + + mode**2 + + right**2 + - left * mode + - left * right + - mode * right + ) / 18 + self.check_moments( + "triangular", + expected_mean, + expected_var, + {"left": left, "mode": mode, "right": right}, + ) def test_invalid_args(self): - left = 2.0 # `left` is expected <= `mode` - mode = 1.0 # `mode` is expected > `left` + left = 2.0 # `left` is expected <= `mode` + mode = 1.0 # `mode` is expected > `left` right = 3.0 # OK - self.check_invalid_args('triangular', {'left': left, 'mode': mode, 'right': right}) + self.check_invalid_args( + "triangular", {"left": left, "mode": mode, "right": right} + ) - left = 1.0 # OK - mode = 3.0 # `mode` is expected <= `right` + left = 1.0 # OK + mode = 3.0 # `mode` is expected <= `right` right = 2.0 # `right` is expected > `mode` - self.check_invalid_args('triangular', {'left': left, 'mode': mode, 'right': right}) + self.check_invalid_args( + "triangular", {"left": left, "mode": mode, "right": right} + ) def test_seed(self): left = 1.0 mode = 2.0 right = 3.0 - self.check_seed('triangular', {'left': left, 'mode': mode, 'right': right}) + self.check_seed( + "triangular", {"left": left, "mode": mode, "right": right} + ) class TestDistributionsUniform(TestDistribution): - def test_extreme_value(self): low = 1.0 high = 1.0 expected_val = low - self.check_extreme_value('uniform', expected_val, - {'low': low, 'high': high}) + self.check_extreme_value( + "uniform", expected_val, {"low": low, "high": high} + ) def test_moments(self): low = 1.0 high = 2.0 expected_mean = (low + high) / 2 expected_var = ((high - low) ** 2) / 12 - self.check_moments('uniform', expected_mean, - expected_var, {'low': low, 'high': high}) + self.check_moments( + "uniform", expected_mean, expected_var, {"low": low, "high": high} + ) def test_seed(self): low = 1.0 high = 2.0 - self.check_seed('uniform', {'low': low, 'high': high}) + self.check_seed("uniform", {"low": low, "high": high}) class TestDistributionsVonmises: - - @pytest.mark.parametrize("kappa", [5.0, 0.5], ids=['large_kappa', 'small_kappa']) + @pytest.mark.parametrize( + "kappa", [5.0, 0.5], ids=["large_kappa", "small_kappa"] + ) def test_moments(self, kappa): size = 10**6 - mu = 2. + mu = 2.0 numpy_res = numpy.random.vonmises(mu, kappa, size=size) expected_mean = numpy.mean(numpy_res) @@ -870,16 +925,18 @@ def test_moments(self, kappa): def test_invalid_args(self): size = 10 - mu = 5.0 # OK + mu = 5.0 # OK kappa = -1.0 # non-negative `kappa` is expected with pytest.raises(ValueError): dpnp.random.vonmises(mu, kappa, size=size) - @pytest.mark.parametrize("kappa", [5.0, 0.5], ids=['large_kappa', 'small_kappa']) + @pytest.mark.parametrize( + "kappa", [5.0, 0.5], ids=["large_kappa", "small_kappa"] + ) def test_seed(self, kappa): seed = 28041990 size = 10 - mu = 2. + mu = 2.0 dpnp.random.seed(seed) a1 = dpnp.asarray(dpnp.random.vonmises(mu, kappa, size=size)) dpnp.random.seed(seed) @@ -888,65 +945,70 @@ def test_seed(self, kappa): class TestDistributionsWald(TestDistribution): - def test_moments(self): size = 5 * 10**6 mean = 3.56 scale = 2.8 expected_mean = mean - expected_var = (mean ** 3) / scale - self.check_moments('wald', expected_mean, expected_var, - {'mean': mean, 'scale': scale}, size=size) + expected_var = (mean**3) / scale + self.check_moments( + "wald", + expected_mean, + expected_var, + {"mean": mean, "scale": scale}, + size=size, + ) def test_invalid_args(self): - size = 10 - mean = -1.0 # positive `mean` is expected - scale = 1.0 # OK - self.check_invalid_args('wald', {'mean': mean, 'scale': scale}) - mean = 1.0 # OK + mean = -1.0 # positive `mean` is expected + scale = 1.0 # OK + self.check_invalid_args("wald", {"mean": mean, "scale": scale}) + mean = 1.0 # OK scale = -1.0 # positive `scale` is expected - self.check_invalid_args('wald', {'mean': mean, 'scale': scale}) + self.check_invalid_args("wald", {"mean": mean, "scale": scale}) def test_seed(self): - mean = 3.56 # `mean` param for Wald distr - scale = 2.8 # `scale` param for Wald distr - self.check_seed('wald', {'mean': mean, 'scale': scale}) + mean = 3.56 # `mean` param for Wald distr + scale = 2.8 # `scale` param for Wald distr + self.check_seed("wald", {"mean": mean, "scale": scale}) class TestDistributionsWeibull(TestDistribution): - def test_extreme_value(self): a = 0.0 expected_val = a - self.check_extreme_value('weibull', expected_val, {'a': a}) + self.check_extreme_value("weibull", expected_val, {"a": a}) def test_invalid_args(self): a = -1.0 # non-negative `a` is expected - self.check_invalid_args('weibull', {'a': a}) + self.check_invalid_args("weibull", {"a": a}) def test_seed(self): a = 2.56 - self.check_seed('weibull', {'a': a}) + self.check_seed("weibull", {"a": a}) class TestDistributionsZipf(TestDistribution): - def test_invalid_args(self): a = 1.0 # parameter `a` is expected greater than 1. - self.check_invalid_args('zipf', {'a': a}) + self.check_invalid_args("zipf", {"a": a}) def test_seed(self): a = 2.56 - self.check_seed('zipf', {'a': a}) + self.check_seed("zipf", {"a": a}) class TestPermutationsTestShuffle: - - @pytest.mark.parametrize("dtype", [dpnp.float32, dpnp.float64, dpnp.int32, dpnp.int64], - ids=['float32', 'float64', 'int32', 'int64']) + @pytest.mark.parametrize( + "dtype", + [dpnp.float32, dpnp.float64, dpnp.int32, dpnp.int64], + ids=["float32", "float64", "int32", "int64"], + ) def test_shuffle(self, dtype): seed = 28041990 - input_x_int64 = dpnp.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], dtype=dpnp.int64) + input_x_int64 = dpnp.asarray( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 0], dtype=dpnp.int64 + ) input_x = dpnp.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], dtype=dtype) dpnp.random.seed(seed) dpnp.random.shuffle(input_x_int64) # inplace @@ -956,8 +1018,11 @@ def test_shuffle(self, dtype): actual_x = input_x assert_array_equal(actual_x, desired_x) - @pytest.mark.parametrize("dtype", [dpnp.float32, dpnp.float64, dpnp.int32, dpnp.int64], - ids=['float32', 'float64', 'int32', 'int64']) + @pytest.mark.parametrize( + "dtype", + [dpnp.float32, dpnp.float64, dpnp.int32, dpnp.int64], + ids=["float32", "float64", "int32", "int64"], + ) def test_no_miss_numbers(self, dtype): seed = 28041990 input_x = dpnp.asarray([5, 4, 0, 7, 6, 1, 8, 3, 2, 9], dtype=dtype) @@ -968,29 +1033,38 @@ def test_no_miss_numbers(self, dtype): actual_x = dpnp.sort(output_x) assert_array_equal(actual_x, desired_x) - @pytest.mark.parametrize("conv", [lambda x: dpnp.array([]), - # lambda x: dpnp.astype(dpnp.asarray(x), dpnp.int8), - lambda x: dpnp.astype(dpnp.asarray(x), dpnp.float32), - # lambda x: dpnp.asarray(x).astype(dpnp.complex64), - # lambda x: dpnp.astype(dpnp.asarray(x), object), - lambda x: dpnp.asarray([[i, i] for i in x]), - lambda x: dpnp.vstack([x, x]).T, - lambda x: (dpnp.asarray([(i, i) for i in x], [ - ("a", int), ("b", int)]).view(dpnp.recarray)), - lambda x: dpnp.asarray([(i, i) for i in x], - [("a", object), ("b", dpnp.int32)])], - ids=['lambda x: dpnp.array([])', - # 'lambda x: dpnp.astype(dpnp.asarray(x), dpnp.int8)', - 'lambda x: dpnp.astype(dpnp.asarray(x), dpnp.float32)', - # 'lambda x: dpnp.asarray(x).astype(dpnp.complex64)', - # 'lambda x: dpnp.astype(dpnp.asarray(x), object)', - 'lambda x: dpnp.asarray([[i, i] for i in x])', - 'lambda x: dpnp.vstack([x, x]).T', - 'lambda x: (dpnp.asarray([(i, i) for i in x], ['\ - '("a", int), ("b", int)]).view(dpnp.recarray))', - 'lambda x: dpnp.asarray([(i, i) for i in x], [("a", object), ("b", dpnp.int32)])]' - ] - ) + @pytest.mark.parametrize( + "conv", + [ + lambda x: dpnp.array([]), + # lambda x: dpnp.astype(dpnp.asarray(x), dpnp.int8), + lambda x: dpnp.astype(dpnp.asarray(x), dpnp.float32), + # lambda x: dpnp.asarray(x).astype(dpnp.complex64), + # lambda x: dpnp.astype(dpnp.asarray(x), object), + lambda x: dpnp.asarray([[i, i] for i in x]), + lambda x: dpnp.vstack([x, x]).T, + lambda x: ( + dpnp.asarray( + [(i, i) for i in x], [("a", int), ("b", int)] + ).view(dpnp.recarray) + ), + lambda x: dpnp.asarray( + [(i, i) for i in x], [("a", object), ("b", dpnp.int32)] + ), + ], + ids=[ + "lambda x: dpnp.array([])", + # 'lambda x: dpnp.astype(dpnp.asarray(x), dpnp.int8)', + "lambda x: dpnp.astype(dpnp.asarray(x), dpnp.float32)", + # 'lambda x: dpnp.asarray(x).astype(dpnp.complex64)', + # 'lambda x: dpnp.astype(dpnp.asarray(x), object)', + "lambda x: dpnp.asarray([[i, i] for i in x])", + "lambda x: dpnp.vstack([x, x]).T", + "lambda x: (dpnp.asarray([(i, i) for i in x], [" + '("a", int), ("b", int)]).view(dpnp.recarray))', + 'lambda x: dpnp.asarray([(i, i) for i in x], [("a", object), ("b", dpnp.int32)])]', + ], + ) def test_shuffle1(self, conv): # `conv` contans test lists, arrays (of various dtypes), and multidimensional # versions of both, c-contiguous or not. @@ -1018,12 +1092,14 @@ def test_shuffle1(self, conv): desired = conv(dpnp_1d) assert_array_equal(actual, desired) - @pytest.mark.parametrize("conv", [lambda x: x, - lambda x: [(i, i) for i in x]], - ids=['lambda x: x', - 'lambda x: [(i, i) for i in x]', - ] - ) + @pytest.mark.parametrize( + "conv", + [lambda x: x, lambda x: [(i, i) for i in x]], + ids=[ + "lambda x: x", + "lambda x: [(i, i) for i in x]", + ], + ) def test_shuffle1_fallback(self, conv): # This is parameterized version of original tests of `numpy.random` (both the same): # * tests/test_random.py::TestRandomDist::test_shuffle diff --git a/tests/test_sort.py b/tests/test_sort.py index 205dddafd9c9..f669d62239ad 100644 --- a/tests/test_sort.py +++ b/tests/test_sort.py @@ -1,29 +1,37 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("kth", - [0, 1], - ids=['0', '1']) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("array", - [[3, 4, 2, 1], - [[1, 0], [3, 0]], - [[3, 2], [1, 6]], - [[4, 2, 3], [3, 4, 1]], - [[[1, -3], [3, 0]], [[5, 2], [0, 1]], [[1, 0], [0, 1]]], - [[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]]], - ids=['[3, 4, 2, 1]', - '[[1, 0], [3, 0]]', - '[[3, 2], [1, 6]]', - '[[4, 2, 3], [3, 4, 1]]', - '[[[1, -3], [3, 0]], [[5, 2], [0, 1]], [[1, 0], [0, 1]]]', - '[[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]]']) +@pytest.mark.parametrize("kth", [0, 1], ids=["0", "1"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "array", + [ + [3, 4, 2, 1], + [[1, 0], [3, 0]], + [[3, 2], [1, 6]], + [[4, 2, 3], [3, 4, 1]], + [[[1, -3], [3, 0]], [[5, 2], [0, 1]], [[1, 0], [0, 1]]], + [ + [[[8, 2], [3, 0]], [[5, 2], [0, 1]]], + [[[1, 3], [3, 1]], [[5, 2], [0, 1]]], + ], + ], + ids=[ + "[3, 4, 2, 1]", + "[[1, 0], [3, 0]]", + "[[3, 2], [1, 6]]", + "[[4, 2, 3], [3, 4, 1]]", + "[[[1, -3], [3, 0]], [[5, 2], [0, 1]], [[1, 0], [0, 1]]]", + "[[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]]", + ], +) def test_partition(array, dtype, kth): a = numpy.array(array, dtype) ia = dpnp.array(array, dtype) @@ -32,43 +40,47 @@ def test_partition(array, dtype, kth): numpy.testing.assert_array_equal(expected, result) -@pytest.mark.parametrize("side", - ["left", "right"], - ids=['"left"', '"right"']) -@pytest.mark.parametrize("v_", - [ - [[3, 4], [2, 1]], - [[1, 0], [3, 0]], - [[3, 2, 1, 6]], - [[4, 2], [3, 3], [4, 1]], - [[1, -3, 3], [0, 5, 2], [0, 1, 1], [0, 0, 1]], - [[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]] - ], - ids=[ - '[[3, 4], [2, 1]]', - '[[1, 0], [3, 0]]', - '[[3, 2, 1, 6]]', - '[[4, 2], [3, 3], [4, 1]]', - '[[1, -3, 3], [0, 5, 2], [0, 1, 1], [0, 0, 1]]', - '[[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]]' - ]) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("array", - [ - [1, 2, 3, 4], - [-5, -1, 0, 3, 17, 100] - ], - ids=[ - '[1, 2, 3, 4]', - '[-5, -1, 0, 3, 17, 100]' - # '[1, 0, 3, 0]', - # '[3, 2, 1, 6]', - # '[4, 2, 3, 3, 4, 1]', - # '[1, -3, 3, 0, 5, 2, 0, 1, 1, 0, 0, 1]', - # '[8, 2, 3, 0, 5, 2, 0, 1, 1, 3, 3, 1, 5, 2, 0, 1]' - ]) +@pytest.mark.parametrize("side", ["left", "right"], ids=['"left"', '"right"']) +@pytest.mark.parametrize( + "v_", + [ + [[3, 4], [2, 1]], + [[1, 0], [3, 0]], + [[3, 2, 1, 6]], + [[4, 2], [3, 3], [4, 1]], + [[1, -3, 3], [0, 5, 2], [0, 1, 1], [0, 0, 1]], + [ + [[[8, 2], [3, 0]], [[5, 2], [0, 1]]], + [[[1, 3], [3, 1]], [[5, 2], [0, 1]]], + ], + ], + ids=[ + "[[3, 4], [2, 1]]", + "[[1, 0], [3, 0]]", + "[[3, 2, 1, 6]]", + "[[4, 2], [3, 3], [4, 1]]", + "[[1, -3, 3], [0, 5, 2], [0, 1, 1], [0, 0, 1]]", + "[[[[8, 2], [3, 0]], [[5, 2], [0, 1]]], [[[1, 3], [3, 1]], [[5, 2], [0, 1]]]]", + ], +) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize( + "array", + [[1, 2, 3, 4], [-5, -1, 0, 3, 17, 100]], + ids=[ + "[1, 2, 3, 4]", + "[-5, -1, 0, 3, 17, 100]" + # '[1, 0, 3, 0]', + # '[3, 2, 1, 6]', + # '[4, 2, 3, 3, 4, 1]', + # '[1, -3, 3, 0, 5, 2, 0, 1, 1, 0, 0, 1]', + # '[8, 2, 3, 0, 5, 2, 0, 1, 1, 3, 3, 1, 5, 2, 0, 1]' + ], +) def test_searchsorted(array, dtype, v_, side): a = numpy.array(array, dtype) ia = dpnp.array(array, dtype) diff --git a/tests/test_special.py b/tests/test_special.py index da9938d75e9c..fd428fa8ca38 100644 --- a/tests/test_special.py +++ b/tests/test_special.py @@ -1,7 +1,9 @@ import math -import dpnp + import numpy +import dpnp + def test_erf(): a = numpy.linspace(2.0, 3.0, num=10) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 7973b008392b..3180cee7924f 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -1,15 +1,15 @@ +import numpy import pytest import dpnp -import numpy - -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) -@pytest.mark.parametrize("size", - [2, 4, 8, 16, 3, 9, 27, 81]) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("size", [2, 4, 8, 16, 3, 9, 27, 81]) def test_median(type, size): a = numpy.arange(size, dtype=type) ia = dpnp.array(a) @@ -20,8 +20,7 @@ def test_median(type, size): numpy.testing.assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("axis", - [0, 1, -1, 2, -2, (1, 2), (0, -2)]) +@pytest.mark.parametrize("axis", [0, 1, -1, 2, -2, (1, 2), (0, -2)]) def test_max(axis): a = numpy.arange(768, dtype=numpy.float64).reshape((4, 4, 6, 8)) ia = dpnp.array(a) @@ -32,35 +31,41 @@ def test_max(axis): numpy.testing.assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("array", - [[2, 0, 6, 2], - [2, 0, 6, 2, 5, 6, 7, 8], - [], - [2, 1, numpy.nan, 5, 3], - [-1, numpy.nan, 1, numpy.inf], - [3, 6, 0, 1], - [3, 6, 0, 1, 8], - [3, 2, 9, 6, numpy.nan], - [numpy.nan, numpy.nan, numpy.inf, numpy.nan], - [[2, 0], [6, 2]], - [[2, 0, 6, 2], [5, 6, 7, 8]], - [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], - [[-1, numpy.nan], [1, numpy.inf]], - [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]]], - ids=['[2, 0, 6, 2]', - '[2, 0, 6, 2, 5, 6, 7, 8]', - '[]', - '[2, 1, np.nan, 5, 3]', - '[-1, np.nan, 1, np.inf]', - '[3, 6, 0, 1]', - '[3, 6, 0, 1, 8]', - '[3, 2, 9, 6, np.nan]', - '[np.nan, np.nan, np.inf, np.nan]', - '[[2, 0], [6, 2]]', - '[[2, 0, 6, 2], [5, 6, 7, 8]]', - '[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]', - '[[-1, np.nan], [1, np.inf]]', - '[[np.nan, np.nan], [np.inf, np.nan]]']) +@pytest.mark.parametrize( + "array", + [ + [2, 0, 6, 2], + [2, 0, 6, 2, 5, 6, 7, 8], + [], + [2, 1, numpy.nan, 5, 3], + [-1, numpy.nan, 1, numpy.inf], + [3, 6, 0, 1], + [3, 6, 0, 1, 8], + [3, 2, 9, 6, numpy.nan], + [numpy.nan, numpy.nan, numpy.inf, numpy.nan], + [[2, 0], [6, 2]], + [[2, 0, 6, 2], [5, 6, 7, 8]], + [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], + [[-1, numpy.nan], [1, numpy.inf]], + [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], + ], + ids=[ + "[2, 0, 6, 2]", + "[2, 0, 6, 2, 5, 6, 7, 8]", + "[]", + "[2, 1, np.nan, 5, 3]", + "[-1, np.nan, 1, np.inf]", + "[3, 6, 0, 1]", + "[3, 6, 0, 1, 8]", + "[3, 2, 9, 6, np.nan]", + "[np.nan, np.nan, np.inf, np.nan]", + "[[2, 0], [6, 2]]", + "[[2, 0, 6, 2], [5, 6, 7, 8]]", + "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", + "[[-1, np.nan], [1, np.inf]]", + "[[np.nan, np.nan], [np.inf, np.nan]]", + ], +) def test_nanvar(array): a = numpy.array(array) ia = dpnp.array(a) @@ -75,17 +80,14 @@ def test_nanvar(array): class TestBincount: - - @pytest.mark.parametrize("array", - [[1, 2, 3], - [1, 2, 2, 1, 2, 4], - [2, 2, 2, 2]], - ids=['[1, 2, 3]', - '[1, 2, 2, 1, 2, 4]', - '[2, 2, 2, 2]']) - @pytest.mark.parametrize("minlength", - [0, 1, 3, 5], - ids=['0', '1', '3', '5']) + @pytest.mark.parametrize( + "array", + [[1, 2, 3], [1, 2, 2, 1, 2, 4], [2, 2, 2, 2]], + ids=["[1, 2, 3]", "[1, 2, 2, 1, 2, 4]", "[2, 2, 2, 2]"], + ) + @pytest.mark.parametrize( + "minlength", [0, 1, 3, 5], ids=["0", "1", "3", "5"] + ) def test_bincount_minlength(self, array, minlength): np_a = numpy.array(array) dpnp_a = dpnp.array(array) @@ -94,16 +96,14 @@ def test_bincount_minlength(self, array, minlength): result = dpnp.bincount(dpnp_a, minlength=minlength) numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("array", - [[1, 2, 2, 1, 2, 4]], - ids=['[1, 2, 2, 1, 2, 4]']) - @pytest.mark.parametrize("weights", - [None, - [0.3, 0.5, 0.2, 0.7, 1., -0.6], - [2, 2, 2, 2, 2, 2]], - ids=['None', - '[0.3, 0.5, 0.2, 0.7, 1., -0.6]', - '[2, 2, 2, 2, 2, 2]']) + @pytest.mark.parametrize( + "array", [[1, 2, 2, 1, 2, 4]], ids=["[1, 2, 2, 1, 2, 4]"] + ) + @pytest.mark.parametrize( + "weights", + [None, [0.3, 0.5, 0.2, 0.7, 1.0, -0.6], [2, 2, 2, 2, 2, 2]], + ids=["None", "[0.3, 0.5, 0.2, 0.7, 1., -0.6]", "[2, 2, 2, 2, 2, 2]"], + ) def test_bincount_weights(self, array, weights): np_a = numpy.array(array) dpnp_a = dpnp.array(array) diff --git a/tests/test_strides.py b/tests/test_strides.py index 08f3bbed0ae6..5866c0c9b874 100644 --- a/tests/test_strides.py +++ b/tests/test_strides.py @@ -1,8 +1,9 @@ import math + +import numpy import pytest import dpnp -import numpy def _getattr(ex, str_): @@ -13,11 +14,17 @@ def _getattr(ex, str_): return res -@pytest.mark.parametrize("func_name", - ['abs', ]) -@pytest.mark.parametrize("type", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=['float64', 'float32', 'int64', 'int32']) +@pytest.mark.parametrize( + "func_name", + [ + "abs", + ], +) +@pytest.mark.parametrize( + "type", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) def test_strides(func_name, type): shape = (4, 4) a = numpy.arange(shape[0] * shape[1], dtype=type).reshape(shape) @@ -34,17 +41,49 @@ def test_strides(func_name, type): numpy.testing.assert_allclose(expected, result) -@pytest.mark.parametrize("func_name", - ["arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctanh", "cbrt", "ceil", "copy", "cos", - "cosh", "conjugate", "degrees", "ediff1d", "exp", "exp2", "expm1", "fabs", "floor", "log", - "log10", "log1p", "log2", "negative", "radians", "sign", "sin", "sinh", "sqrt", "square", - "tanh", "trunc"]) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(10,)], - ids=["(10,)"]) +@pytest.mark.parametrize( + "func_name", + [ + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctanh", + "cbrt", + "ceil", + "copy", + "cos", + "cosh", + "conjugate", + "degrees", + "ediff1d", + "exp", + "exp2", + "expm1", + "fabs", + "floor", + "log", + "log10", + "log1p", + "log2", + "negative", + "radians", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "tanh", + "trunc", + ], +) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(10,)], ids=["(10,)"]) def test_strides_1arg(func_name, dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a[::2] @@ -61,12 +100,12 @@ def test_strides_1arg(func_name, dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(10,)], - ids=["(10,)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(10,)], ids=["(10,)"]) def test_strides_erf(dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a[::2] @@ -83,12 +122,12 @@ def test_strides_erf(dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(10,)], - ids=["(10,)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(10,)], ids=["(10,)"]) def test_strides_reciprocal(dtype, shape): start, stop = 1, numpy.prod(shape) + 1 @@ -104,12 +143,12 @@ def test_strides_reciprocal(dtype, shape): numpy.testing.assert_allclose(result, expected, rtol=1e-06) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(10,)], - ids=["(10,)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(10,)], ids=["(10,)"]) def test_strides_tan(dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a[::2] @@ -123,14 +162,25 @@ def test_strides_tan(dtype, shape): numpy.testing.assert_allclose(result, expected, rtol=1e-06) -@pytest.mark.parametrize("func_name", - ["add", "arctan2", "hypot", "maximum", "minimum", "multiply", "power", "subtract"]) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(3, 3)], - ids=["(3, 3)"]) +@pytest.mark.parametrize( + "func_name", + [ + "add", + "arctan2", + "hypot", + "maximum", + "minimum", + "multiply", + "power", + "subtract", + ], +) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(3, 3)], ids=["(3, 3)"]) def test_strides_2args(func_name, dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a.T @@ -147,14 +197,14 @@ def test_strides_2args(func_name, dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("func_name", - ["bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift"]) -@pytest.mark.parametrize("dtype", - [numpy.int64, numpy.int32], - ids=["int64", "int32"]) -@pytest.mark.parametrize("shape", - [(3, 3)], - ids=["(3, 3)"]) +@pytest.mark.parametrize( + "func_name", + ["bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift"], +) +@pytest.mark.parametrize( + "dtype", [numpy.int64, numpy.int32], ids=["int64", "int32"] +) +@pytest.mark.parametrize("shape", [(3, 3)], ids=["(3, 3)"]) def test_strides_bitwise(func_name, dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a.T @@ -171,12 +221,12 @@ def test_strides_bitwise(func_name, dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(3, 3)], - ids=["(3, 3)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(3, 3)], ids=["(3, 3)"]) def test_strides_copysign(dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = -a.T @@ -190,12 +240,12 @@ def test_strides_copysign(dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(3, 3)], - ids=["(3, 3)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(3, 3)], ids=["(3, 3)"]) def test_strides_fmod(dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a.T + 1 @@ -209,12 +259,12 @@ def test_strides_fmod(dtype, shape): numpy.testing.assert_allclose(result, expected) -@pytest.mark.parametrize("dtype", - [numpy.float64, numpy.float32, numpy.int64, numpy.int32], - ids=["float64", "float32", "int64", "int32"]) -@pytest.mark.parametrize("shape", - [(3, 3)], - ids=["(3, 3)"]) +@pytest.mark.parametrize( + "dtype", + [numpy.float64, numpy.float32, numpy.int64, numpy.int32], + ids=["float64", "float32", "int64", "int32"], +) +@pytest.mark.parametrize("shape", [(3, 3)], ids=["(3, 3)"]) def test_strides_true_devide(dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) b = a.T + 1 diff --git a/tests/test_sum.py b/tests/test_sum.py index 21b1a99ffe15..06d384fd76d1 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -1,10 +1,16 @@ -import dpnp - import numpy +import dpnp + def test_sum_float64(): - a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]]) + a = numpy.array( + [ + [[-2.0, 3.0], [9.1, 0.2]], + [[-2.0, 5.0], [-2, -1.2]], + [[1.0, -2.0], [5.0, -1.1]], + ] + ) ia = dpnp.array(a) for axis in range(len(a)): @@ -23,7 +29,13 @@ def test_sum_int(): def test_sum_axis(): - a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]]) + a = numpy.array( + [ + [[-2.0, 3.0], [9.1, 0.2]], + [[-2.0, 5.0], [-2, -1.2]], + [[1.0, -2.0], [5.0, -1.1]], + ] + ) ia = dpnp.array(a) result = dpnp.sum(ia, axis=1) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index b858db48a1fa..d4d64d60c7a0 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -1,9 +1,8 @@ -import pytest - -import dpnp import dpctl import numpy +import pytest +import dpnp list_of_backend_str = [ "host", @@ -44,53 +43,34 @@ def assert_sycl_queue_equal(result, expected): @pytest.mark.parametrize( "func,data", [ - pytest.param("abs", - [-1.2, 1.2]), - pytest.param("ceil", - [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), - pytest.param("conjugate", - [[1.+1.j, 0.], [0., 1.+1.j]]), - pytest.param("copy", - [1., 2., 3.]), - pytest.param("cumprod", - [[1., 2., 3.], [4., 5., 6.]]), - pytest.param("cumsum", - [[1., 2., 3.], [4., 5., 6.]]), - pytest.param("diff", - [1., 2., 4., 7., 0.]), - pytest.param("ediff1d", - [1., 2., 4., 7., 0.]), - pytest.param("fabs", - [-1.2, 1.2]), - pytest.param("floor", - [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), - pytest.param("gradient", - [1., 2., 4., 7., 11., 16.]), - pytest.param("nancumprod", - [1., dpnp.nan]), - pytest.param("nancumsum", - [1., dpnp.nan]), - pytest.param("nanprod", - [1., dpnp.nan]), - pytest.param("nansum", - [1., dpnp.nan]), - pytest.param("negative", - [1., -1.]), - pytest.param("prod", - [1., 2.]), - pytest.param("sign", - [-5., 4.5]), - pytest.param("sum", - [1., 2.]), - pytest.param("trapz", - [[0., 1., 2.], [3., 4., 5.]]), - pytest.param("trunc", - [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("abs", [-1.2, 1.2]), + pytest.param("ceil", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("conjugate", [[1.0 + 1.0j, 0.0], [0.0, 1.0 + 1.0j]]), + pytest.param("copy", [1.0, 2.0, 3.0]), + pytest.param("cumprod", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), + pytest.param("cumsum", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), + pytest.param("diff", [1.0, 2.0, 4.0, 7.0, 0.0]), + pytest.param("ediff1d", [1.0, 2.0, 4.0, 7.0, 0.0]), + pytest.param("fabs", [-1.2, 1.2]), + pytest.param("floor", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("gradient", [1.0, 2.0, 4.0, 7.0, 11.0, 16.0]), + pytest.param("nancumprod", [1.0, dpnp.nan]), + pytest.param("nancumsum", [1.0, dpnp.nan]), + pytest.param("nanprod", [1.0, dpnp.nan]), + pytest.param("nansum", [1.0, dpnp.nan]), + pytest.param("negative", [1.0, -1.0]), + pytest.param("prod", [1.0, 2.0]), + pytest.param("sign", [-5.0, 4.5]), + pytest.param("sum", [1.0, 2.0]), + pytest.param("trapz", [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]), + pytest.param("trunc", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), ], ) -@pytest.mark.parametrize("device", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_1in_1out(func, data, device): x_orig = numpy.array(data) expected = getattr(numpy, func)(x_orig) @@ -110,50 +90,56 @@ def test_1in_1out(func, data, device): @pytest.mark.parametrize( "func,data1,data2", [ - pytest.param("add", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), - pytest.param("copysign", - [0., 1., 2.], - [-1., 0., 1.]), - pytest.param("cross", - [1., 2., 3.], - [4., 5., 6.]), - pytest.param("divide", - [0., 1., 2., 3., 4.], - [4., 4., 4., 4., 4.]), - pytest.param("floor_divide", - [1., 2., 3., 4.], - [2.5, 2.5, 2.5, 2.5]), - pytest.param("fmod", - [-3., -2., -1., 1., 2., 3.], - [2., 2., 2., 2., 2., 2.]), - pytest.param("maximum", - [2., 3., 4.], - [1., 5., 2.]), - pytest.param("minimum", - [2., 3., 4.], - [1., 5., 2.]), - pytest.param("multiply", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), - pytest.param("power", - [0., 1., 2., 3., 4., 5.], - [1., 2., 3., 3., 2., 1.]), - pytest.param("remainder", - [0., 1., 2., 3., 4., 5., 6.], - [5., 5., 5., 5., 5., 5., 5.]), - pytest.param("subtract", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), - pytest.param("matmul", - [[1., 0.], [0., 1.]], - [[4., 1.], [1., 2.]]), + pytest.param( + "add", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), + pytest.param("copysign", [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0]), + pytest.param("cross", [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]), + pytest.param( + "divide", [0.0, 1.0, 2.0, 3.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0] + ), + pytest.param( + "floor_divide", [1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5] + ), + pytest.param( + "fmod", + [-3.0, -2.0, -1.0, 1.0, 2.0, 3.0], + [2.0, 2.0, 2.0, 2.0, 2.0, 2.0], + ), + pytest.param("maximum", [2.0, 3.0, 4.0], [1.0, 5.0, 2.0]), + pytest.param("minimum", [2.0, 3.0, 4.0], [1.0, 5.0, 2.0]), + pytest.param( + "multiply", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), + pytest.param( + "power", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0], + [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + ), + pytest.param( + "remainder", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], + ), + pytest.param( + "subtract", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), + pytest.param( + "matmul", [[1.0, 0.0], [0.0, 1.0]], [[4.0, 1.0], [1.0, 2.0]] + ), ], ) -@pytest.mark.parametrize("device", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_2in_1out(func, data1, data2, device): x1_orig = numpy.array(data1) x2_orig = numpy.array(data2) @@ -175,32 +161,32 @@ def test_2in_1out(func, data1, data2, device): @pytest.mark.parametrize( "func,data1,data2", [ - pytest.param("add", - [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], - [0., 1., 2.]), - pytest.param("divide", - [0., 1., 2., 3., 4.], - [4.]), - pytest.param("floor_divide", - [1., 2., 3., 4.], - [2.5]), - pytest.param("fmod", - [-3., -2., -1., 1., 2., 3.], - [2.]), - pytest.param("multiply", - [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], - [0., 1., 2.]), - pytest.param("remainder", - [0., 1., 2., 3., 4., 5., 6.], - [5.]), - pytest.param("subtract", - [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], - [0., 1., 2.]), + pytest.param( + "add", + [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [0.0, 1.0, 2.0], + ), + pytest.param("divide", [0.0, 1.0, 2.0, 3.0, 4.0], [4.0]), + pytest.param("floor_divide", [1.0, 2.0, 3.0, 4.0], [2.5]), + pytest.param("fmod", [-3.0, -2.0, -1.0, 1.0, 2.0, 3.0], [2.0]), + pytest.param( + "multiply", + [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [0.0, 1.0, 2.0], + ), + pytest.param("remainder", [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [5.0]), + pytest.param( + "subtract", + [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [0.0, 1.0, 2.0], + ), ], ) -@pytest.mark.parametrize("device", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_broadcasting(func, data1, data2, device): x1_orig = numpy.array(data1) x2_orig = numpy.array(data2) @@ -222,44 +208,52 @@ def test_broadcasting(func, data1, data2, device): @pytest.mark.parametrize( "func,data1,data2", [ - pytest.param("add", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), - pytest.param("copysign", - [0., 1., 2.], - [-1., 0., 1.]), - pytest.param("divide", - [0., 1., 2., 3., 4.], - [4., 4., 4., 4., 4.]), - pytest.param("floor_divide", - [1., 2., 3., 4.], - [2.5, 2.5, 2.5, 2.5]), - pytest.param("fmod", - [-3., -2., -1., 1., 2., 3.], - [2., 2., 2., 2., 2., 2.]), - pytest.param("maximum", - [2., 3., 4.], - [1., 5., 2.]), - pytest.param("minimum", - [2., 3., 4.], - [1., 5., 2.]), - pytest.param("multiply", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), - pytest.param("power", - [0., 1., 2., 3., 4., 5.], - [1., 2., 3., 3., 2., 1.]), - pytest.param("remainder", - [0., 1., 2., 3., 4., 5., 6.], - [5., 5., 5., 5., 5., 5., 5.]), - pytest.param("subtract", - [0., 1., 2., 3., 4., 5., 6., 7., 8.], - [0., 1., 2., 0., 1., 2., 0., 1., 2.]), + pytest.param( + "add", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), + pytest.param("copysign", [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0]), + pytest.param( + "divide", [0.0, 1.0, 2.0, 3.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0] + ), + pytest.param( + "floor_divide", [1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5] + ), + pytest.param( + "fmod", + [-3.0, -2.0, -1.0, 1.0, 2.0, 3.0], + [2.0, 2.0, 2.0, 2.0, 2.0, 2.0], + ), + pytest.param("maximum", [2.0, 3.0, 4.0], [1.0, 5.0, 2.0]), + pytest.param("minimum", [2.0, 3.0, 4.0], [1.0, 5.0, 2.0]), + pytest.param( + "multiply", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), + pytest.param( + "power", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0], + [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + ), + pytest.param( + "remainder", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], + ), + pytest.param( + "subtract", + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + [0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0], + ), ], ) -@pytest.mark.parametrize("device", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_out(func, data1, data2, device): x1_orig = numpy.array(data1) x2_orig = numpy.array(data2) @@ -280,9 +274,11 @@ def test_out(func, data1, data2, device): assert result_queue.sycl_device == expected_queue.sycl_device -@pytest.mark.parametrize("device", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_modf(device): data = [0, 3.5] @@ -306,14 +302,18 @@ def test_modf(device): assert result2_queue.sycl_device == expected_queue.sycl_device -@pytest.mark.parametrize("device_from", - valid_devices, - ids=[device.filter_string for device in valid_devices]) -@pytest.mark.parametrize("device_to", - valid_devices, - ids=[device.filter_string for device in valid_devices]) +@pytest.mark.parametrize( + "device_from", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) +@pytest.mark.parametrize( + "device_to", + valid_devices, + ids=[device.filter_string for device in valid_devices], +) def test_to_device(device_from, device_to): - data = [1., 1., 1., 1., 1.] + data = [1.0, 1.0, 1.0, 1.0, 1.0] x = dpnp.array(data, device=device_from) y = x.to_device(device_to) diff --git a/tests/test_umath.py b/tests/test_umath.py index 52941ba94cb5..54b524a68b01 100644 --- a/tests/test_umath.py +++ b/tests/test_umath.py @@ -1,25 +1,39 @@ +import numpy import pytest -import numpy import dpnp # full list of umaths umaths = [i for i in dir(numpy) if isinstance(getattr(numpy, i), numpy.ufunc)] # print(umaths) -umaths = ['equal'] +umaths = ["equal"] # trigonometric -umaths.extend(['arccos', 'arcsin', 'arctan', 'cos', 'deg2rad', 'degrees', - 'rad2deg', 'radians', 'sin', 'tan', 'arctan2', 'hypot']) +umaths.extend( + [ + "arccos", + "arcsin", + "arctan", + "cos", + "deg2rad", + "degrees", + "rad2deg", + "radians", + "sin", + "tan", + "arctan2", + "hypot", + ] +) # 'unwrap' types = { - 'd': numpy.float64, - 'f': numpy.float32, - 'l': numpy.int64, - 'i': numpy.int32, + "d": numpy.float64, + "f": numpy.float32, + "l": numpy.int64, + "i": numpy.int32, } -supported_types = 'dfli' +supported_types = "dfli" def check_types(args_str): @@ -49,7 +63,7 @@ def get_args(args_str, xp=numpy): np_umath = getattr(numpy, umath) _types = np_umath.types for type in _types: - args_str = type[:type.find('->')] + args_str = type[: type.find("->")] if check_types(args_str): test_cases.append((umath, args_str)) @@ -58,7 +72,7 @@ def get_id(val): return val.__str__() -@pytest.mark.parametrize('test_cases', test_cases, ids=get_id) +@pytest.mark.parametrize("test_cases", test_cases, ids=get_id) def test_umaths(test_cases): umath, args_str = test_cases args = get_args(args_str, xp=numpy) @@ -74,7 +88,6 @@ def test_umaths(test_cases): class TestSin: - def test_sin_ordinary(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -90,9 +103,11 @@ def test_sin_ordinary(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -101,9 +116,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.sin(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -114,7 +129,6 @@ def test_invalid_shape(self, shape): class TestCos: - def test_cos(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -130,9 +144,11 @@ def test_cos(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -141,9 +157,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.cos(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -154,7 +170,6 @@ def test_invalid_shape(self, shape): class TestsLog: - def test_log(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -170,9 +185,11 @@ def test_log(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -181,9 +198,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.log(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -194,7 +211,6 @@ def test_invalid_shape(self, shape): class TestExp: - def test_exp(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -210,9 +226,11 @@ def test_exp(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -221,9 +239,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.exp(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -234,7 +252,6 @@ def test_invalid_shape(self, shape): class TestArcsin: - def test_arcsin(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -250,9 +267,11 @@ def test_arcsin(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -261,9 +280,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.arcsin(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -274,7 +293,6 @@ def test_invalid_shape(self, shape): class TestArctan: - def test_arctan(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -290,9 +308,11 @@ def test_arctan(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -301,9 +321,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.arctan(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -314,7 +334,6 @@ def test_invalid_shape(self, shape): class TestTan: - def test_tan(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -330,9 +349,11 @@ def test_tan(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -341,9 +362,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.tan(dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -354,7 +375,6 @@ def test_invalid_shape(self, shape): class TestArctan2: - def test_arctan2(self): array_data = numpy.arange(10) out = numpy.empty(10, dtype=numpy.float64) @@ -370,9 +390,11 @@ def test_arctan2(self): numpy.testing.assert_array_equal(expected, result) - @pytest.mark.parametrize("dtype", - [numpy.float32, numpy.int64, numpy.int32], - ids=['numpy.float32', 'numpy.int64', 'numpy.int32']) + @pytest.mark.parametrize( + "dtype", + [numpy.float32, numpy.int64, numpy.int32], + ids=["numpy.float32", "numpy.int64", "numpy.int32"], + ) def test_invalid_dtype(self, dtype): dp_array = dpnp.arange(10, dtype=dpnp.float64) @@ -381,9 +403,9 @@ def test_invalid_dtype(self, dtype): with pytest.raises(ValueError): dpnp.arctan2(dp_array, dp_array, out=dp_out) - @pytest.mark.parametrize("shape", - [(0,), (15, ), (2, 2)], - ids=['(0,)', '(15, )', '(2,2)']) + @pytest.mark.parametrize( + "shape", [(0,), (15,), (2, 2)], ids=["(0,)", "(15, )", "(2,2)"] + ) def test_invalid_shape(self, shape): dp_array = dpnp.arange(10, dtype=dpnp.float64) diff --git a/tests/testing/__init__.py b/tests/testing/__init__.py index 795fc9a85529..62cdd2ddd3a6 100644 --- a/tests/testing/__init__.py +++ b/tests/testing/__init__.py @@ -1,3 +1,5 @@ -from tests.testing.array import assert_allclose -from tests.testing.array import assert_array_equal -from tests.testing.array import assert_equal +from tests.testing.array import ( + assert_allclose, + assert_array_equal, + assert_equal, +) diff --git a/tests/testing/array.py b/tests/testing/array.py index 247811a019d1..c65b5a1bb1a8 100644 --- a/tests/testing/array.py +++ b/tests/testing/array.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,8 +25,8 @@ # ***************************************************************************** import numpy -from dpnp.dpnp_utils import convert_item +from dpnp.dpnp_utils import convert_item assert_allclose_orig = numpy.testing.assert_allclose assert_array_equal_orig = numpy.testing.assert_array_equal diff --git a/tests/tests_perf/data_generator.py b/tests/tests_perf/data_generator.py index bf568d7f6107..dc1b471f46a1 100644 --- a/tests/tests_perf/data_generator.py +++ b/tests/tests_perf/data_generator.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -24,16 +24,12 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -import dpnp -from dpnp.dparray import dparray - import numpy +import dpnp +from dpnp.dparray import dparray -__all__ = [ - "gen_array_1d", - "gen_array_2d" -] +__all__ = ["gen_array_1d", "gen_array_2d"] def gen_ndarray(size, dtype=numpy.float64, low=None, high=None, seed=None): @@ -70,7 +66,9 @@ def gen_ndarray(size, dtype=numpy.float64, low=None, high=None, seed=None): return numpy.random.randint(low, high, size=size, dtype=dtype) - raise NotImplementedError(f"Generator of ndarray of type {dtype.__name__} not found.") + raise NotImplementedError( + f"Generator of ndarray of type {dtype.__name__} not found." + ) def gen_dparray(size, dtype=numpy.float64, low=None, high=None, seed=None): @@ -105,7 +103,9 @@ def gen_dparray(size, dtype=numpy.float64, low=None, high=None, seed=None): return dparr -def gen_array_1d(lib, size, dtype=numpy.float64, low=None, high=None, seed=None): +def gen_array_1d( + lib, size, dtype=numpy.float64, low=None, high=None, seed=None +): """ Generate array of random numbers bases on library. @@ -137,5 +137,9 @@ def gen_array_1d(lib, size, dtype=numpy.float64, low=None, high=None, seed=None) raise NotImplementedError(f"{lib.__name__} array generator not found.") -def gen_array_2d(lib, size_x, size_y, dtype=numpy.float64, low=None, high=None, seed=None): - return gen_array_1d(lib, size_x * size_y, dtype=dtype, low=low, high=high, seed=seed).reshape((size_x, size_y)) +def gen_array_2d( + lib, size_x, size_y, dtype=numpy.float64, low=None, high=None, seed=None +): + return gen_array_1d( + lib, size_x * size_y, dtype=dtype, low=low, high=high, seed=seed + ).reshape((size_x, size_y)) diff --git a/tests/tests_perf/examples/sinAB_test.py b/tests/tests_perf/examples/sinAB_test.py index 52e2b2ada6f0..37abbc70e9a0 100644 --- a/tests/tests_perf/examples/sinAB_test.py +++ b/tests/tests_perf/examples/sinAB_test.py @@ -2,7 +2,7 @@ def cos_2_args(executor, size, test_type): - """ sin(A + B) = sin A cos B + cos A sin B """ + """sin(A + B) = sin A cos B + cos A sin B""" start_time = time.perf_counter() input_A = executor.arange(size, dtype=test_type) @@ -25,16 +25,20 @@ def cos_2_args(executor, size, test_type): end_time = time.perf_counter() calculation_time = end_time - start_time - print(f"memalloc_time={memalloc_time}, calculation_time={calculation_time}, executor={executor}") + print( + f"memalloc_time={memalloc_time}, calculation_time={calculation_time}, executor={executor}" + ) return result -if __name__ == '__main__': +if __name__ == "__main__": size = 33554432 # 16777216 import dpnp + cos_2_args(dpnp, size, dpnp.float64) import numpy + cos_2_args(numpy, size, numpy.float64) diff --git a/tests/tests_perf/math_tests/test_black_scholes.py b/tests/tests_perf/math_tests/test_black_scholes.py index fdfcccc15803..88d4da7edfc5 100644 --- a/tests/tests_perf/math_tests/test_black_scholes.py +++ b/tests/tests_perf/math_tests/test_black_scholes.py @@ -1,11 +1,11 @@ import math + import numpy import pytest from tests.tests_perf.data_generator import * from tests.tests_perf.test_perf_base import DPNPTestPerfBase - SEED = 7777777 SL, SH = 10.0, 50.0 KL, KH = 10.0, 50.0 @@ -31,7 +31,9 @@ def gen_data(lib, low, high, size): def black_scholes_put(lib, S, K, T, r, sigma): - d1 = (lib.log(S / K) + (r + sigma * sigma / 2.) * T) / (sigma * lib.sqrt(T)) + d1 = (lib.log(S / K) + (r + sigma * sigma / 2.0) * T) / ( + sigma * lib.sqrt(T) + ) d2 = d1 - sigma * lib.sqrt(T) cdf_d1 = (1 + lib.erf(d1 / lib.sqrt(2))) / 2 @@ -43,7 +45,6 @@ def black_scholes_put(lib, S, K, T, r, sigma): class TestBlackScholes(DPNPTestPerfBase): - @pytest.mark.parametrize("dtype", [numpy.float64]) @pytest.mark.parametrize("size", [1024, 2048, 4096, 8192]) def test_bs_put(self, lib, dtype, size): @@ -52,6 +53,16 @@ def test_bs_put(self, lib, dtype, size): K = gen_data(lib, KL, KH, size) T = gen_data(lib, TL, TH, size) - self.dpnp_benchmark("bs_put", lib, dtype, size, - lib, S, K, T, RISK_FREE, VOLATILITY, - custom_fptr=black_scholes_put) + self.dpnp_benchmark( + "bs_put", + lib, + dtype, + size, + lib, + S, + K, + T, + RISK_FREE, + VOLATILITY, + custom_fptr=black_scholes_put, + ) diff --git a/tests/tests_perf/math_tests/test_dpnp.py b/tests/tests_perf/math_tests/test_dpnp.py index 4c291d4aec25..558f21acc417 100644 --- a/tests/tests_perf/math_tests/test_dpnp.py +++ b/tests/tests_perf/math_tests/test_dpnp.py @@ -1,15 +1,18 @@ -import dpnp import numpy import pytest +import dpnp from tests.tests_perf.data_generator import * from tests.tests_perf.test_perf_base import DPNPTestPerfBase class TestDPNP(DPNPTestPerfBase): - - @pytest.mark.parametrize("dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("size", [32, 64, 128, 256]) # , 512, 1024, 2048, 4096]) + @pytest.mark.parametrize( + "dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "size", [32, 64, 128, 256] + ) # , 512, 1024, 2048, 4096]) def test_matmul(self, lib, dtype, size): input1 = gen_array_2d(lib, size, size, dtype=dtype, seed=self.seed) input2 = gen_array_2d(lib, size, size, dtype=dtype, seed=self.seed) diff --git a/tests/tests_perf/math_tests/test_mathematical.py b/tests/tests_perf/math_tests/test_mathematical.py index 0f5382cff519..5af69d8d8bc2 100644 --- a/tests/tests_perf/math_tests/test_mathematical.py +++ b/tests/tests_perf/math_tests/test_mathematical.py @@ -6,10 +6,15 @@ class TestDPNPMathematical(DPNPTestPerfBase): - - @pytest.mark.parametrize("func_name", ["add", "divide", "multiply", "subtract"]) - @pytest.mark.parametrize("dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("size", [512, 1024, 2048, 4096, 8192, 16384, 32768]) + @pytest.mark.parametrize( + "func_name", ["add", "divide", "multiply", "subtract"] + ) + @pytest.mark.parametrize( + "dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "size", [512, 1024, 2048, 4096, 8192, 16384, 32768] + ) def test_math_2args(self, func_name, lib, dtype, size): input1 = gen_array_1d(lib, size, dtype=dtype, seed=self.seed) input2 = gen_array_1d(lib, size, dtype=dtype, seed=self.seed) diff --git a/tests/tests_perf/math_tests/test_trigonometric.py b/tests/tests_perf/math_tests/test_trigonometric.py index db1d90aa1b6e..f96e10a4c009 100644 --- a/tests/tests_perf/math_tests/test_trigonometric.py +++ b/tests/tests_perf/math_tests/test_trigonometric.py @@ -1,13 +1,13 @@ -import dpnp import numpy import pytest +import dpnp from tests.tests_perf.data_generator import * from tests.tests_perf.test_perf_base import DPNPTestPerfBase def cos_2_args(input_A, input_B, lib): - """ sin(A + B) = sin A cos B + cos A sin B """ + """sin(A + B) = sin A cos B + cos A sin B""" sin_A = lib.sin(input_A) cos_B = lib.cos(input_B) sincosA = sin_A * cos_B @@ -20,13 +20,44 @@ def cos_2_args(input_A, input_B, lib): class TestDPNPTrigonometric(DPNPTestPerfBase): - - @pytest.mark.parametrize("func_name", ["arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctanh", - "cbrt", "cos", "cosh", "deg2rad", "degrees", "exp", "exp2", - "expm1", "log", "log10", "log1p", "log2", "rad2deg", "radians", - "reciprocal", "sin", "sinh", "sqrt", "square", "tan", "tanh"]) - @pytest.mark.parametrize("dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32]) - @pytest.mark.parametrize("size", [512, 1024, 2048, 4096, 8192, 16384, 32768]) + @pytest.mark.parametrize( + "func_name", + [ + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctanh", + "cbrt", + "cos", + "cosh", + "deg2rad", + "degrees", + "exp", + "exp2", + "expm1", + "log", + "log10", + "log1p", + "log2", + "rad2deg", + "radians", + "reciprocal", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + ], + ) + @pytest.mark.parametrize( + "dtype", [numpy.float64, numpy.float32, numpy.int64, numpy.int32] + ) + @pytest.mark.parametrize( + "size", [512, 1024, 2048, 4096, 8192, 16384, 32768] + ) def test_trig1(self, func_name, lib, dtype, size): input1 = gen_array_1d(lib, size, dtype=dtype, seed=self.seed) @@ -45,4 +76,13 @@ def test_app1(self, lib, dtype, size): input1 = gen_array_1d(lib, size, dtype=dtype, seed=self.seed) input2 = gen_array_1d(lib, size, dtype=dtype, seed=self.seed) - self.dpnp_benchmark("cos_2_args", lib, dtype, input1.size, input1, input2, lib, custom_fptr=cos_2_args) + self.dpnp_benchmark( + "cos_2_args", + lib, + dtype, + input1.size, + input1, + input2, + lib, + custom_fptr=cos_2_args, + ) diff --git a/tests/tests_perf/test_perf_base.py b/tests/tests_perf/test_perf_base.py index 3443b2fd89e0..99a9d019d265 100644 --- a/tests/tests_perf/test_perf_base.py +++ b/tests/tests_perf/test_perf_base.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -30,10 +30,11 @@ import time import warnings -import dpnp import numpy import pytest +import dpnp + # def pytest_generate_tests(metafunc): # metafunc.parametrize("lib", [numpy, dpnp], ids=["base", "DPNP"]) @@ -70,7 +71,9 @@ def add(self, name, lib, dtype, size, result): self.results_data[name][dtype][lib][size] = result - def dpnp_benchmark(self, name, lib, dtype, size, *args, custom_fptr=None, **kwargs): + def dpnp_benchmark( + self, name, lib, dtype, size, *args, custom_fptr=None, **kwargs + ): """ Test performance of specified function. @@ -89,15 +92,15 @@ def dpnp_benchmark(self, name, lib, dtype, size, *args, custom_fptr=None, **kwar kwargs : dict key word parameters of the function """ - if (custom_fptr is None): + if custom_fptr is None: examine_function = getattr(lib, name) else: examine_function = custom_fptr exec_times = [] - for iteration in range(self.repeat): + for _ in range(self.repeat): start_time = time.perf_counter() - result = examine_function(*args, **kwargs) + examine_function(*args, **kwargs) end_time = time.perf_counter() exec_times.append(end_time - start_time) @@ -108,13 +111,13 @@ def print_head(self): print() pw = self.print_width pwn = self.print_num_width - print(f"Function".center(pw[0]), end=self.sep) - print(f"type".center(pw[1]), end=self.sep) - print(f"lib".center(pw[2]), end=self.sep) - print(f"size".center(pw[3]), end=self.sep) - print(f"median".center(pwn), end=self.sep) - print(f"min".center(pwn), end=self.sep) - print(f"max".center(pwn), end=self.sep) + print("Function".center(pw[0]), end=self.sep) + print("type".center(pw[1]), end=self.sep) + print("lib".center(pw[2]), end=self.sep) + print("size".center(pw[3]), end=self.sep) + print("median".center(pwn), end=self.sep) + print("min".center(pwn), end=self.sep) + print("max".center(pwn), end=self.sep) print() def print_csv(self): @@ -151,7 +154,13 @@ def print_csv(self): graph_data[lib_id_prn]["x"].append(size) graph_data[lib_id_prn]["y"].append(val_median) - self.plot_graph_2lines(self, graph_data, func_name=func_name, lib=lib_id_prn, type=dtype_id_prn) + self.plot_graph_2lines( + self, + graph_data, + func_name=func_name, + lib=lib_id_prn, + type=dtype_id_prn, + ) self.plot_graph_ratio(self, func_name, func_results) def plot_graph_2lines(self, graph_data, func_name, lib, type): @@ -165,7 +174,7 @@ def plot_graph_2lines(self, graph_data, func_name, lib, type): plt.grid(True) for lib_id, axis in graph_data.items(): - plt.plot(axis["x"], axis["y"], label=lib_id, marker='.') + plt.plot(axis["x"], axis["y"], label=lib_id, marker=".") plt.legend() plt.tight_layout() @@ -181,14 +190,18 @@ def plot_graph_ratio(self, func_name, func_results): plt.suptitle(f"Ratio for '{func_name}' time in (s)") plt.xlabel("number of elements") plt.ylabel("ratio") - ax.spines['bottom'].set_position(('data', 1)) + ax.spines["bottom"].set_position(("data", 1)) ax.grid(True) for dtype_id, dtype_results in func_results.items(): dtype_id_prn = dtype_id.__name__ - if (len(dtype_results.keys()) != 2): - warnings.warn(UserWarning("DPNP Performance test: expected two libraries only for this type of graph")) + if len(dtype_results.keys()) != 2: + warnings.warn( + UserWarning( + "DPNP Performance test: expected two libraries only for this type of graph" + ) + ) plt.close() return @@ -207,7 +220,7 @@ def plot_graph_ratio(self, func_name, func_results): val_ratio_x.append(size) val_ratio_y.append(lib_results_0_median / lib_results_1_median) - plt.plot(val_ratio_x, val_ratio_y, label=dtype_id_prn, marker='.') + plt.plot(val_ratio_x, val_ratio_y, label=dtype_id_prn, marker=".") ax.legend() plt.tight_layout() diff --git a/tests/third_party/cupy/binary_tests/test_elementwise.py b/tests/third_party/cupy/binary_tests/test_elementwise.py index b2212e043f23..3d69999b0b9a 100644 --- a/tests/third_party/cupy/binary_tests/test_elementwise.py +++ b/tests/third_party/cupy/binary_tests/test_elementwise.py @@ -5,7 +5,6 @@ @testing.gpu class TestElementwise(unittest.TestCase): - @testing.for_int_dtypes() @testing.numpy_cupy_array_equal() def check_unary_int(self, name, xp, dtype): @@ -20,19 +19,19 @@ def check_binary_int(self, name, xp, dtype): return getattr(xp, name)(a, b) def test_bitwise_and(self): - self.check_binary_int('bitwise_and') + self.check_binary_int("bitwise_and") def test_bitwise_or(self): - self.check_binary_int('bitwise_or') + self.check_binary_int("bitwise_or") def test_bitwise_xor(self): - self.check_binary_int('bitwise_xor') + self.check_binary_int("bitwise_xor") def test_invert(self): - self.check_unary_int('invert') + self.check_unary_int("invert") def test_left_shift(self): - self.check_binary_int('left_shift') + self.check_binary_int("left_shift") def test_right_shift(self): - self.check_binary_int('right_shift') + self.check_binary_int("right_shift") diff --git a/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py b/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py index 395af99edc8b..13662d3ebea1 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py @@ -9,7 +9,6 @@ @testing.gpu class TestConj(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal() def test_conj(self, xp, dtype): @@ -41,7 +40,6 @@ def test_conjugate_pass(self, xp, dtype): @testing.gpu class TestAngle(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal() def test_angle(self, xp, dtype): @@ -51,7 +49,6 @@ def test_angle(self, xp, dtype): @testing.gpu class TestRealImag(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_real(self, xp, dtype): @@ -102,28 +99,28 @@ def test_real_setter_zero_dim(self, xp, dtype): x.real = 2 return x - @testing.for_dtypes('FD') + @testing.for_dtypes("FD") @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_real_setter_non_contiguous(self, xp, dtype): x = testing.shaped_arange((2, 3, 2), xp, dtype).transpose(0, 2, 1) x.real = testing.shaped_reverse_arange((2, 2, 3), xp, dtype).real return x - @testing.for_dtypes('FD') + @testing.for_dtypes("FD") @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_imag_setter(self, xp, dtype): x = testing.shaped_arange((2, 3), xp, dtype) x.imag = testing.shaped_reverse_arange((2, 3), xp, dtype).real return x - @testing.for_dtypes('FD') + @testing.for_dtypes("FD") @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_imag_setter_zero_dim(self, xp, dtype): x = xp.array(1, dtype=dtype) x.imag = 2 return x - @testing.for_dtypes('FD') + @testing.for_dtypes("FD") @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_imag_setter_non_contiguous(self, xp, dtype): x = testing.shaped_arange((2, 3, 2), xp, dtype).transpose(0, 2, 1) @@ -153,16 +150,16 @@ def test_imag_inplace(self, dtype): x.imag[:] = 1 expected = cupy.zeros((2, 3), dtype=dtype) + ( - 1j if x.dtype.kind == 'c' else 0) + 1j if x.dtype.kind == "c" else 0 + ) assert cupy.all(x == expected) @testing.gpu class TestScalarConversion(unittest.TestCase): - @testing.for_all_dtypes() def test_scalar_conversion(self, dtype): - scalar = 1 + 1j if numpy.dtype(dtype).kind == 'c' else 1 + scalar = 1 + 1j if numpy.dtype(dtype).kind == "c" else 1 x_1d = cupy.array([scalar]).astype(dtype) self.assertEqual(complex(x_1d), scalar) diff --git a/tests/third_party/cupy/core_tests/test_ndarray_conversion.py b/tests/third_party/cupy/core_tests/test_ndarray_conversion.py index 9dcf1bd0d781..d9b73196cafc 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_conversion.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_conversion.py @@ -8,12 +8,11 @@ @testing.parameterize( - {'shape': ()}, - {'shape': (1,)}, - {'shape': (1, 1, 1)}, + {"shape": ()}, + {"shape": (1,)}, + {"shape": (1, 1, 1)}, ) class TestNdarrayItem(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_item(self, xp, dtype): @@ -22,12 +21,11 @@ def test_item(self, xp, dtype): @testing.parameterize( - {'shape': (0,)}, - {'shape': (2, 3)}, - {'shape': (1, 0, 1)}, + {"shape": (0,)}, + {"shape": (2, 3)}, + {"shape": (1, 0, 1)}, ) class TestNdarrayItemRaise(unittest.TestCase): - def test_item(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp, xp.float32) @@ -36,19 +34,18 @@ def test_item(self): @testing.parameterize( - {'shape': ()}, - {'shape': (1,)}, - {'shape': (2, 3)}, - {'shape': (2, 3), 'order': 'C'}, - {'shape': (2, 3), 'order': 'F'}, + {"shape": ()}, + {"shape": (1,)}, + {"shape": (2, 3)}, + {"shape": (2, 3), "order": "C"}, + {"shape": (2, 3), "order": "F"}, ) class TestNdarrayToBytes(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_item(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype) - if hasattr(self, 'order'): + if hasattr(self, "order"): return a.tobytes(self.order) else: return a.tobytes() diff --git a/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py b/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py index 1a5a87fbae40..5fdd3d120449 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py @@ -5,12 +5,13 @@ import dpnp as cupy from tests.third_party.cupy import testing + # from cupy import util def astype_without_warning(x, dtype, *args, **kwargs): dtype = numpy.dtype(dtype) - if x.dtype.kind == 'c' and dtype.kind not in ['b', 'c']: + if x.dtype.kind == "c" and dtype.kind not in ["b", "c"]: with testing.assert_warns(numpy.ComplexWarning): return x.astype(dtype, *args, **kwargs) else: @@ -19,7 +20,6 @@ def astype_without_warning(x, dtype, *args, **kwargs): @testing.gpu class TestArrayCopyAndView(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_view(self, xp): a = testing.shaped_arange((4,), xp, dtype=numpy.float32) @@ -99,15 +99,15 @@ def test_transposed_fill(self, xp, dtype): b.fill(1) return b - @testing.for_orders(['C', 'F', 'A', 'K', None]) - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_orders(["C", "F", "A", "K", None]) + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() def test_astype(self, xp, src_dtype, dst_dtype, order): a = testing.shaped_arange((2, 3, 4), xp, src_dtype) return astype_without_warning(a, dst_dtype, order=order) - @testing.for_orders('CFAK') - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_orders("CFAK") + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) def test_astype_type(self, src_dtype, dst_dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, src_dtype) b = astype_without_warning(a, dst_dtype, order=order) @@ -115,14 +115,14 @@ def test_astype_type(self, src_dtype, dst_dtype, order): b_cpu = astype_without_warning(a_cpu, dst_dtype, order=order) self.assertEqual(b.dtype.type, b_cpu.dtype.type) - @testing.for_orders('CAK') + @testing.for_orders("CAK") @testing.for_all_dtypes() def test_astype_type_c_contiguous_no_copy(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) b = a.astype(dtype, order=order, copy=False) self.assertTrue(b is a) - @testing.for_orders('FAK') + @testing.for_orders("FAK") @testing.for_all_dtypes() def test_astype_type_f_contiguous_no_copy(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -130,34 +130,40 @@ def test_astype_type_f_contiguous_no_copy(self, dtype, order): b = a.astype(dtype, order=order, copy=False) self.assertTrue(b is a) - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() def test_astype_strides(self, xp, src_dtype, dst_dtype): src = xp.empty((1, 2, 3), dtype=src_dtype) return numpy.array( - astype_without_warning(src, dst_dtype, order='K').strides) + astype_without_warning(src, dst_dtype, order="K").strides + ) - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() def test_astype_strides_negative(self, xp, src_dtype, dst_dtype): src = xp.empty((2, 3), dtype=src_dtype)[::-1, :] return numpy.array( - astype_without_warning(src, dst_dtype, order='K').strides) + astype_without_warning(src, dst_dtype, order="K").strides + ) - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() def test_astype_strides_swapped(self, xp, src_dtype, dst_dtype): src = xp.swapaxes(xp.empty((2, 3, 4), dtype=src_dtype), 1, 0) return numpy.array( - astype_without_warning(src, dst_dtype, order='K').strides) + astype_without_warning(src, dst_dtype, order="K").strides + ) - @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) + @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() def test_astype_strides_broadcast(self, xp, src_dtype, dst_dtype): - src, _ = xp.broadcast_arrays(xp.empty((2,), dtype=src_dtype), - xp.empty((2, 3, 2), dtype=src_dtype)) + src, _ = xp.broadcast_arrays( + xp.empty((2,), dtype=src_dtype), + xp.empty((2, 3, 2), dtype=src_dtype), + ) return numpy.array( - astype_without_warning(src, dst_dtype, order='K').strides) + astype_without_warning(src, dst_dtype, order="K").strides + ) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -172,9 +178,10 @@ def test_diagonal2(self, xp, dtype): return a.diagonal(-1, 2, 0) # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') - @testing.for_orders('CF') - @testing.for_dtypes([numpy.int16, numpy.int64, - numpy.float16, numpy.float64]) + @testing.for_orders("CF") + @testing.for_dtypes( + [numpy.int16, numpy.int64, numpy.float16, numpy.float64] + ) @testing.numpy_cupy_array_equal() def test_isinstance_numpy_copy(self, xp, dtype, order): a = numpy.arange(100, dtype=dtype).reshape(10, 10, order=order) @@ -184,7 +191,7 @@ def test_isinstance_numpy_copy(self, xp, dtype, order): # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') def test_isinstance_numpy_copy_wrong_dtype(self): - for xp in (numpy, cupy): + for _ in (numpy, cupy): a = numpy.arange(100, dtype=numpy.float64).reshape(10, 10) b = cupy.empty(a.shape, dtype=numpy.int32) with pytest.raises(ValueError): @@ -192,7 +199,7 @@ def test_isinstance_numpy_copy_wrong_dtype(self): # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') def test_isinstance_numpy_copy_wrong_shape(self): - for xp in (numpy, cupy): + for _ in (numpy, cupy): a = numpy.arange(100, dtype=numpy.float64).reshape(10, 10) b = cupy.empty(100, dtype=a.dtype) with pytest.raises(ValueError): @@ -207,19 +214,19 @@ def test_isinstance_numpy_copy_not_slice(self, xp): @testing.parameterize( - {'src_order': 'C'}, - {'src_order': 'F'}, + {"src_order": "C"}, + {"src_order": "F"}, ) @testing.gpu class TestNumPyArrayCopyView(unittest.TestCase): # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') - @testing.for_orders('CF') - @testing.for_dtypes([numpy.int16, numpy.int64, - numpy.float16, numpy.float64]) + @testing.for_orders("CF") + @testing.for_dtypes( + [numpy.int16, numpy.int64, numpy.float16, numpy.float64] + ) @testing.numpy_cupy_array_equal() def test_isinstance_numpy_view_copy_f(self, xp, dtype, order): - a = numpy.arange(100, dtype=dtype).reshape( - 10, 10, order=self.src_order) + a = numpy.arange(100, dtype=dtype).reshape(10, 10, order=self.src_order) a = a[2:5, 1:8] b = xp.empty(a.shape, dtype=dtype, order=order) b[:] = a diff --git a/tests/third_party/cupy/core_tests/test_ndarray_math.py b/tests/third_party/cupy/core_tests/test_ndarray_math.py index 36bd6979c6ec..19d814edc2dc 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_math.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_math.py @@ -6,9 +6,13 @@ from tests.third_party.cupy import testing -@testing.parameterize(*testing.product({ - 'decimals': [-2, -1, 0, 1, 2], -})) +@testing.parameterize( + *testing.product( + { + "decimals": [-2, -1, 0, 1, 2], + } + ) +) class TestRound(unittest.TestCase): shape = (20,) @@ -29,18 +33,22 @@ def test_round(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_round_out(self, xp): - a = testing.shaped_random(self.shape, xp, scale=100, dtype='d') + a = testing.shaped_random(self.shape, xp, scale=100, dtype="d") out = xp.empty_like(a) a.round(self.decimals, out) return out -@testing.parameterize(*testing.product({ - # limit to: - # * <=0: values like 0.35 and 0.035 cannot be expressed exactly in IEEE 754 - # * >-4: to avoid float16 overflow - 'decimals': [-3, -2, -1, 0], -})) +@testing.parameterize( + *testing.product( + { + # limit to: + # * <=0: values like 0.35 and 0.035 cannot be expressed exactly in IEEE 754 + # * >-4: to avoid float16 overflow + "decimals": [-3, -2, -1, 0], + } + ) +) class TestRoundHalfway(unittest.TestCase): shape = (20,) @@ -52,7 +60,7 @@ def test_round_halfway_float(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype=dtype) a *= 2 a -= a.size + 1 - scale = 10**abs(self.decimals) + scale = 10 ** abs(self.decimals) if self.decimals < 0: a *= scale else: @@ -68,7 +76,7 @@ def test_round_halfway_int(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype=dtype) a *= 2 a -= a.size + 1 - scale = 10**abs(self.decimals) + scale = 10 ** abs(self.decimals) if self.decimals < 0: a *= xp.array(scale, dtype=dtype) a >>= 1 @@ -82,7 +90,7 @@ def test_round_halfway_uint(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype=dtype) a *= 2 a -= 1 - scale = 10**abs(self.decimals) + scale = 10 ** abs(self.decimals) if self.decimals < 0: a *= xp.array(scale, dtype=dtype) a >>= 1 @@ -90,24 +98,21 @@ def test_round_halfway_uint(self, xp, dtype): return a.round(self.decimals) -@testing.parameterize(*testing.product({ - 'decimals': [-5, -4, -3, -2, -1, 0] -})) +@testing.parameterize(*testing.product({"decimals": [-5, -4, -3, -2, -1, 0]})) class TestRoundMinMax(unittest.TestCase): - - @unittest.skip('Known incompatibility: see core.pyx') + @unittest.skip("Known incompatibility: see core.pyx") @testing.numpy_cupy_array_equal() def _test_round_int64(self, xp): - a = xp.array([-2**62, 2**62], dtype=xp.int64) + a = xp.array([-(2**62), 2**62], dtype=xp.int64) return a.round(self.decimals) - @unittest.skip('Known incompatibility: see core.pyx') + @unittest.skip("Known incompatibility: see core.pyx") @testing.numpy_cupy_array_equal() def test_round_uint64(self, xp): a = xp.array([2**63], dtype=xp.uint64) return a.round(self.decimals) - @unittest.skip('Known incompatibility: see core.pyx') + @unittest.skip("Known incompatibility: see core.pyx") @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_round_minmax(self, xp, dtype): diff --git a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py index 6f5f466062d0..0af722751c35 100644 --- a/tests/third_party/cupy/core_tests/test_ndarray_reduction.py +++ b/tests/third_party/cupy/core_tests/test_ndarray_reduction.py @@ -3,13 +3,13 @@ import numpy import dpnp as cupy + # from cupy.core import _accelerator from tests.third_party.cupy import testing @testing.gpu class TestArrayReduction(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_max_all(self, xp, dtype): @@ -61,19 +61,19 @@ def test_max_multiple_axes_keepdims(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_max_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.max() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_max_nan_real(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.max() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_max_nan_imag(self, xp, dtype): - a = xp.array([float('nan') * 1.j, 1.j, -1.j], dtype) + a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) return a.max() @testing.for_all_dtypes() @@ -127,19 +127,19 @@ def test_min_multiple_axes_keepdims(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_min_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.min() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_min_nan_real(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.min() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_min_nan_imag(self, xp, dtype): - a = xp.array([float('nan') * 1.j, 1.j, -1.j], dtype) + a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) return a.min() # skip bool: numpy's ptp raises a TypeError on bool inputs @@ -149,7 +149,7 @@ def test_ptp_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return a.ptp() - @testing.with_requires('numpy>=1.15') + @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_ptp_all_keepdims(self, xp, dtype): @@ -180,14 +180,14 @@ def test_ptp_axis2(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.ptp(axis=2) - @testing.with_requires('numpy>=1.15') + @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_ptp_multiple_axes(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return a.ptp(axis=(1, 2)) - @testing.with_requires('numpy>=1.15') + @testing.with_requires("numpy>=1.15") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_ptp_multiple_axes_keepdims(self, xp, dtype): @@ -197,27 +197,31 @@ def test_ptp_multiple_axes_keepdims(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_ptp_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.ptp() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_ptp_nan_real(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return a.ptp() @testing.for_complex_dtypes() @testing.numpy_cupy_allclose() def test_ptp_nan_imag(self, xp, dtype): - a = xp.array([float('nan') * 1.j, 1.j, -1.j], dtype) + a = xp.array([float("nan") * 1.0j, 1.0j, -1.0j], dtype) return a.ptp() # This class compares CUB results against NumPy's -@testing.parameterize(*testing.product({ - 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], - 'order': ('C', 'F'), -})) +@testing.parameterize( + *testing.product( + { + "shape": [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], + "order": ("C", "F"), + } + ) +) @testing.gpu # @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled') class TestCubReduction(unittest.TestCase): @@ -231,12 +235,12 @@ class TestCubReduction(unittest.TestCase): # @testing.for_contiguous_axes() @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(rtol=1E-5) + @testing.numpy_cupy_allclose(rtol=1e-5) def test_cub_min(self, xp, dtype, axis): a = testing.shaped_random(self.shape, xp, dtype) - if self.order in ('c', 'C'): + if self.order in ("c", "C"): a = xp.ascontiguousarray(a) - elif self.order in ('f', 'F'): + elif self.order in ("f", "F"): a = xp.asfortranarray(a) if xp is numpy: @@ -245,9 +249,9 @@ def test_cub_min(self, xp, dtype, axis): # xp is cupy, first ensure we really use CUB ret = cupy.empty(()) # Cython checks return type, need to fool it if len(axis) == len(self.shape): - func = 'cupy.core._routines_statistics.cub.device_reduce' + func = "cupy.core._routines_statistics.cub.device_reduce" else: - func = 'cupy.core._routines_statistics.cub.device_segmented_reduce' + func = "cupy.core._routines_statistics.cub.device_segmented_reduce" with testing.AssertFunctionIsCalled(func, return_value=ret): a.min(axis=axis) # ...then perform the actual computation @@ -255,12 +259,12 @@ def test_cub_min(self, xp, dtype, axis): # @testing.for_contiguous_axes() @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(rtol=1E-5) + @testing.numpy_cupy_allclose(rtol=1e-5) def test_cub_max(self, xp, dtype, axis): a = testing.shaped_random(self.shape, xp, dtype) - if self.order in ('c', 'C'): + if self.order in ("c", "C"): a = xp.ascontiguousarray(a) - elif self.order in ('f', 'F'): + elif self.order in ("f", "F"): a = xp.asfortranarray(a) if xp is numpy: @@ -269,9 +273,9 @@ def test_cub_max(self, xp, dtype, axis): # xp is cupy, first ensure we really use CUB ret = cupy.empty(()) # Cython checks return type, need to fool it if len(axis) == len(self.shape): - func = 'cupy.core._routines_statistics.cub.device_reduce' + func = "cupy.core._routines_statistics.cub.device_reduce" else: - func = 'cupy.core._routines_statistics.cub.device_segmented_reduce' + func = "cupy.core._routines_statistics.cub.device_segmented_reduce" with testing.AssertFunctionIsCalled(func, return_value=ret): a.max(axis=axis) # ...then perform the actual computation diff --git a/tests/third_party/cupy/creation_tests/test_basic.py b/tests/third_party/cupy/creation_tests/test_basic.py index c9df1a92f355..0adf656843c3 100644 --- a/tests/third_party/cupy/creation_tests/test_basic.py +++ b/tests/third_party/cupy/creation_tests/test_basic.py @@ -19,7 +19,7 @@ def test_empty(self, xp, dtype, order): @testing.slow def test_empty_huge_size(self): - a = cupy.empty((1024, 2048, 1024), dtype='b') + a = cupy.empty((1024, 2048, 1024), dtype="b") a.fill(123) self.assertTrue((a == 123).all()) # Free huge memory for slow test @@ -28,7 +28,7 @@ def test_empty_huge_size(self): @testing.slow def test_empty_huge_size_fill0(self): - a = cupy.empty((1024, 2048, 1024), dtype='b') + a = cupy.empty((1024, 2048, 1024), dtype="b") a.fill(0) self.assertTrue((a == 0).all()) # Free huge memory for slow test @@ -53,7 +53,7 @@ def test_empty_int(self, xp, dtype, order): @testing.slow def test_empty_int_huge_size(self): - a = cupy.empty(2 ** 31, dtype='b') + a = cupy.empty(2**31, dtype="b") a.fill(123) self.assertTrue((a == 123).all()) # Free huge memory for slow test @@ -62,14 +62,14 @@ def test_empty_int_huge_size(self): @testing.slow def test_empty_int_huge_size_fill0(self): - a = cupy.empty(2 ** 31, dtype='b') + a = cupy.empty(2**31, dtype="b") a.fill(0) self.assertTrue((a == 0).all()) # Free huge memory for slow test del a cupy.get_default_memory_pool().free_all_blocks() - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like(self, xp, dtype, order): @@ -78,20 +78,20 @@ def test_empty_like(self, xp, dtype, order): b.fill(0) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_contiguity(self, xp, dtype, order): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = xp.empty_like(a, order=order) b.fill(0) - if order in ['f', 'F']: + if order in ["f", "F"]: self.assertTrue(b.flags.f_contiguous) else: self.assertTrue(b.flags.c_contiguous) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_contiguity2(self, xp, dtype, order): @@ -99,13 +99,13 @@ def test_empty_like_contiguity2(self, xp, dtype, order): a = xp.asfortranarray(a) b = xp.empty_like(a, order=order) b.fill(0) - if order in ['c', 'C']: + if order in ["c", "C"]: self.assertTrue(b.flags.c_contiguous) else: self.assertTrue(b.flags.f_contiguous) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_contiguity3(self, xp, dtype, order): @@ -114,10 +114,10 @@ def test_empty_like_contiguity3(self, xp, dtype, order): a = a[:, ::2, :].swapaxes(0, 1) b = xp.empty_like(a, order=order) b.fill(0) - if order in ['k', 'K', None]: + if order in ["k", "K", None]: self.assertFalse(b.flags.c_contiguous) self.assertFalse(b.flags.f_contiguous) - elif order in ['f', 'F']: + elif order in ["f", "F"]: self.assertFalse(b.flags.c_contiguous) self.assertTrue(b.flags.f_contiguous) else: @@ -130,13 +130,13 @@ def test_empty_like_K_strides(self, dtype): # test strides that are both non-contiguous and non-descending a = testing.shaped_arange((2, 3, 4), numpy, dtype) a = a[:, ::2, :].swapaxes(0, 1) - b = numpy.empty_like(a, order='K') + b = numpy.empty_like(a, order="K") b.fill(0) # GPU case ag = testing.shaped_arange((2, 3, 4), cupy, dtype) ag = ag[:, ::2, :].swapaxes(0, 1) - bg = cupy.empty_like(ag, order='K') + bg = cupy.empty_like(ag, order="K") bg.fill(0) # make sure NumPy and CuPy strides agree @@ -148,7 +148,7 @@ def test_empty_like_invalid_order(self, dtype): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3, 4), xp, dtype) with pytest.raises(TypeError): - xp.empty_like(a, order='Q') + xp.empty_like(a, order="Q") def test_empty_like_subok(self): a = testing.shaped_arange((2, 3, 4), cupy) @@ -157,8 +157,8 @@ def test_empty_like_subok(self): @testing.for_CF_orders() def test_empty_zero_sized_array_strides(self, order): - a = numpy.empty((1, 0, 2), dtype='d', order=order) - b = cupy.empty((1, 0, 2), dtype='d', order=order) + a = numpy.empty((1, 0, 2), dtype="d", order=order) + b = cupy.empty((1, 0, 2), dtype="d", order=order) self.assertEqual(b.strides, a.strides) @testing.for_all_dtypes() @@ -191,11 +191,11 @@ def test_zeros_int(self, xp, dtype, order): @testing.for_CF_orders() def test_zeros_strides(self, order): - a = numpy.zeros((2, 3), dtype='d', order=order) - b = cupy.zeros((2, 3), dtype='d', order=order) + a = numpy.zeros((2, 3), dtype="d", order=order) + b = cupy.zeros((2, 3), dtype="d", order=order) self.assertEqual(b.strides, a.strides) - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_zeros_like(self, xp, dtype, order): @@ -212,7 +212,7 @@ def test_zeros_like_subok(self): def test_ones(self, xp, dtype): return xp.ones((2, 3, 4), dtype=dtype) - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_ones_like(self, xp, dtype, order): @@ -239,7 +239,7 @@ def test_full_default_dtype(self, xp, dtype): def test_full_default_dtype_cpu_input(self, xp, dtype): return xp.full((2, 3, 4), numpy.array(1, dtype=dtype)) - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_full_like(self, xp, dtype, order): @@ -253,15 +253,16 @@ def test_full_like_subok(self): @testing.parameterize( - *testing.product({ - 'shape': [4, (4, ), (4, 2), (4, 2, 3), (5, 4, 2, 3)], - }) + *testing.product( + { + "shape": [4, (4,), (4, 2), (4, 2, 3), (5, 4, 2, 3)], + } + ) ) @testing.gpu class TestBasicReshape(unittest.TestCase): - - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_reshape(self, xp, dtype, order): @@ -281,21 +282,21 @@ def test_empty_like_reshape_cupy_only(self, dtype, order): testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_reshape_contiguity(self, xp, dtype, order): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = xp.empty_like(a, order=order, shape=self.shape) b.fill(0) - if order in ['f', 'F']: + if order in ["f", "F"]: self.assertTrue(b.flags.f_contiguous) else: self.assertTrue(b.flags.c_contiguous) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() def test_empty_like_reshape_contiguity_cupy_only(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -303,14 +304,14 @@ def test_empty_like_reshape_contiguity_cupy_only(self, dtype, order): b.fill(0) c = cupy.empty(self.shape) c.fill(0) - if order in ['f', 'F']: + if order in ["f", "F"]: self.assertTrue(b.flags.f_contiguous) else: self.assertTrue(b.flags.c_contiguous) testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_reshape_contiguity2(self, xp, dtype, order): @@ -319,14 +320,15 @@ def test_empty_like_reshape_contiguity2(self, xp, dtype, order): b = xp.empty_like(a, order=order, shape=self.shape) b.fill(0) shape = self.shape if not numpy.isscalar(self.shape) else (self.shape,) - if (order in ['c', 'C'] or - (order in ['k', 'K', None] and len(shape) != a.ndim)): + if order in ["c", "C"] or ( + order in ["k", "K", None] and len(shape) != a.ndim + ): self.assertTrue(b.flags.c_contiguous) else: self.assertTrue(b.flags.f_contiguous) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() def test_empty_like_reshape_contiguity2_cupy_only(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -336,15 +338,16 @@ def test_empty_like_reshape_contiguity2_cupy_only(self, dtype, order): c = cupy.empty(self.shape) c.fill(0) shape = self.shape if not numpy.isscalar(self.shape) else (self.shape,) - if (order in ['c', 'C'] or - (order in ['k', 'K', None] and len(shape) != a.ndim)): + if order in ["c", "C"] or ( + order in ["k", "K", None] and len(shape) != a.ndim + ): self.assertTrue(b.flags.c_contiguous) else: self.assertTrue(b.flags.f_contiguous) testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_empty_like_reshape_contiguity3(self, xp, dtype, order): @@ -357,10 +360,10 @@ def test_empty_like_reshape_contiguity3(self, xp, dtype, order): if len(shape) == 1: self.assertTrue(b.flags.c_contiguous) self.assertTrue(b.flags.f_contiguous) - elif order in ['k', 'K', None] and len(shape) == a.ndim: + elif order in ["k", "K", None] and len(shape) == a.ndim: self.assertFalse(b.flags.c_contiguous) self.assertFalse(b.flags.f_contiguous) - elif order in ['f', 'F']: + elif order in ["f", "F"]: self.assertFalse(b.flags.c_contiguous) self.assertTrue(b.flags.f_contiguous) else: @@ -368,7 +371,7 @@ def test_empty_like_reshape_contiguity3(self, xp, dtype, order): self.assertFalse(b.flags.f_contiguous) return b - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() def test_empty_like_reshape_contiguity3_cupy_only(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -380,10 +383,10 @@ def test_empty_like_reshape_contiguity3_cupy_only(self, dtype, order): if len(shape) == 1: self.assertTrue(b.flags.c_contiguous) self.assertTrue(b.flags.f_contiguous) - elif order in ['k', 'K', None] and len(shape) == a.ndim: + elif order in ["k", "K", None] and len(shape) == a.ndim: self.assertFalse(b.flags.c_contiguous) self.assertFalse(b.flags.f_contiguous) - elif order in ['f', 'F']: + elif order in ["f", "F"]: self.assertFalse(b.flags.c_contiguous) self.assertTrue(b.flags.f_contiguous) else: @@ -394,27 +397,27 @@ def test_empty_like_reshape_contiguity3_cupy_only(self, dtype, order): c.fill(0) testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') + @testing.with_requires("numpy>=1.17.0") @testing.for_all_dtypes() def test_empty_like_K_strides_reshape(self, dtype): # test strides that are both non-contiguous and non-descending a = testing.shaped_arange((2, 3, 4), numpy, dtype) a = a[:, ::2, :].swapaxes(0, 1) - b = numpy.empty_like(a, order='K', shape=self.shape) + b = numpy.empty_like(a, order="K", shape=self.shape) b.fill(0) # GPU case ag = testing.shaped_arange((2, 3, 4), cupy, dtype) ag = ag[:, ::2, :].swapaxes(0, 1) - bg = cupy.empty_like(ag, order='K', shape=self.shape) + bg = cupy.empty_like(ag, order="K", shape=self.shape) bg.fill(0) # make sure NumPy and CuPy strides agree self.assertEqual(b.strides, bg.strides) return - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_zeros_like_reshape(self, xp, dtype, order): @@ -430,8 +433,8 @@ def test_zeros_like_reshape_cupy_only(self, dtype, order): testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_ones_like_reshape(self, xp, dtype, order): @@ -446,8 +449,8 @@ def test_ones_like_reshape_cupy_only(self, dtype): testing.assert_array_equal(b, c) - @testing.with_requires('numpy>=1.17.0') - @testing.for_orders('C') + @testing.with_requires("numpy>=1.17.0") + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_full_like_reshape(self, xp, dtype, order): diff --git a/tests/third_party/cupy/creation_tests/test_from_data.py b/tests/third_party/cupy/creation_tests/test_from_data.py index a2fecb8d6418..c96ccc581805 100644 --- a/tests/third_party/cupy/creation_tests/test_from_data.py +++ b/tests/third_party/cupy/creation_tests/test_from_data.py @@ -1,39 +1,39 @@ import tempfile import unittest +import numpy import pytest import dpnp as cupy from tests.third_party.cupy import testing -import numpy @testing.gpu class TestFromData(unittest.TestCase): # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array(self, xp, dtype, order): return xp.array([[1, 2, 3], [2, 3, 4]], dtype=dtype, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_from_empty_list(self, xp, dtype, order): return xp.array([], dtype=dtype, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_from_nested_empty_list(self, xp, dtype, order): return xp.array([[], []], dtype=dtype, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_from_numpy(self, xp, dtype, order): @@ -41,7 +41,7 @@ def test_array_from_numpy(self, xp, dtype, order): return xp.array(a, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_from_numpy_scalar(self, xp, dtype, order): @@ -49,7 +49,7 @@ def test_array_from_numpy_scalar(self, xp, dtype, order): return xp.array(a, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_from_numpy_broad_cast(self, xp, dtype, order): @@ -57,8 +57,8 @@ def test_array_from_numpy_broad_cast(self, xp, dtype, order): a = numpy.broadcast_to(a, (2, 3, 4)) return xp.array(a, order=order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) def test_array_from_list_of_numpy(self, xp, dtype, src_order, dst_order): @@ -66,26 +66,30 @@ def test_array_from_list_of_numpy(self, xp, dtype, src_order, dst_order): # cupy.array() a = [ testing.shaped_arange((3, 4), numpy, dtype, src_order) + (12 * i) - for i in range(2)] + for i in range(2) + ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_from_list_of_numpy_view(self, xp, dtype, src_order, - dst_order): + def test_array_from_list_of_numpy_view( + self, xp, dtype, src_order, dst_order + ): # compares numpy.array() with # cupy.array() # create a list of view of ndarrays a = [ - (testing.shaped_arange((3, 8), numpy, - dtype, src_order) + (24 * i))[:, ::2] - for i in range(2)] + (testing.shaped_arange((3, 8), numpy, dtype, src_order) + (24 * i))[ + :, ::2 + ] + for i in range(2) + ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK') + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) def test_array_from_list_of_numpy_scalar(self, xp, dtype, order): @@ -94,26 +98,28 @@ def test_array_from_list_of_numpy_scalar(self, xp, dtype, order): a = [numpy.array(i, dtype=dtype) for i in range(2)] return xp.array(a, order=order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_from_nested_list_of_numpy(self, xp, dtype, src_order, - dst_order): + def test_array_from_nested_list_of_numpy( + self, xp, dtype, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ - [testing.shaped_arange( - (3, 4), numpy, dtype, src_order) + (12 * i)] - for i in range(2)] + [testing.shaped_arange((3, 4), numpy, dtype, src_order) + (12 * i)] + for i in range(2) + ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') - @testing.for_all_dtypes_combination(names=('dtype1', 'dtype2')) + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") + @testing.for_all_dtypes_combination(names=("dtype1", "dtype2")) @testing.numpy_cupy_array_equal(strides_check=True) def test_array_from_list_of_cupy( - self, xp, dtype1, dtype2, src_order, dst_order): + self, xp, dtype1, dtype2, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ @@ -122,36 +128,41 @@ def test_array_from_list_of_cupy( ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_from_list_of_cupy_view(self, xp, dtype, src_order, - dst_order): + def test_array_from_list_of_cupy_view( + self, xp, dtype, src_order, dst_order + ): # compares numpy.array() with # cupy.array() # create a list of view of ndarrays a = [ - (testing.shaped_arange((3, 8), xp, - dtype, src_order) + (24 * i))[:, ::2] - for i in range(2)] + (testing.shaped_arange((3, 8), xp, dtype, src_order) + (24 * i))[ + :, ::2 + ] + for i in range(2) + ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_from_nested_list_of_cupy(self, xp, dtype, src_order, - dst_order): + def test_array_from_nested_list_of_cupy( + self, xp, dtype, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ [testing.shaped_arange((3, 4), xp, dtype, src_order) + (12 * i)] - for i in range(2)] + for i in range(2) + ] return xp.array(a, order=dst_order) - @testing.for_orders('CFAK') + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal(strides_check=True) def test_array_from_list_of_cupy_scalar(self, xp, dtype, order): @@ -161,7 +172,7 @@ def test_array_from_list_of_cupy_scalar(self, xp, dtype, order): return xp.array(a, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_copy(self, xp, dtype, order): @@ -169,7 +180,7 @@ def test_array_copy(self, xp, dtype, order): return xp.array(a, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_copy_is_copied(self, xp, dtype, order): @@ -179,9 +190,9 @@ def test_array_copy_is_copied(self, xp, dtype, order): return b # @testing.for_orders('CFAK') - @testing.for_orders('C') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("C") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal() def test_array_copy_with_dtype(self, xp, dtype1, dtype2, order): # complex to real makes no sense @@ -189,9 +200,9 @@ def test_array_copy_with_dtype(self, xp, dtype1, dtype2, order): return xp.array(a, dtype=dtype2, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("C") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal() def test_array_copy_with_dtype_char(self, xp, dtype1, dtype2, order): # complex to real makes no sense @@ -199,70 +210,77 @@ def test_array_copy_with_dtype_char(self, xp, dtype1, dtype2, order): return xp.array(a, dtype=numpy.dtype(dtype2).char, order=order) # @testing.for_orders('CFAK') - @testing.for_orders('C') + @testing.for_orders("C") @testing.numpy_cupy_array_equal() def test_array_copy_with_dtype_being_none(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) return xp.array(a, dtype=None, order=order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_copy_list_of_numpy_with_dtype(self, xp, dtype1, dtype2, - src_order, dst_order): + def test_array_copy_list_of_numpy_with_dtype( + self, xp, dtype1, dtype2, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ testing.shaped_arange((3, 4), numpy, dtype1, src_order) + (12 * i) - for i in range(2)] + for i in range(2) + ] return xp.array(a, dtype=dtype2, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_copy_list_of_numpy_with_dtype_char(self, xp, dtype1, - dtype2, src_order, - dst_order): + def test_array_copy_list_of_numpy_with_dtype_char( + self, xp, dtype1, dtype2, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ testing.shaped_arange((3, 4), numpy, dtype1, src_order) + (12 * i) - for i in range(2)] + for i in range(2) + ] return xp.array(a, dtype=numpy.dtype(dtype2).char, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_copy_list_of_cupy_with_dtype(self, xp, dtype1, dtype2, - src_order, dst_order): + def test_array_copy_list_of_cupy_with_dtype( + self, xp, dtype1, dtype2, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ testing.shaped_arange((3, 4), xp, dtype1, src_order) + (12 * i) - for i in range(2)] + for i in range(2) + ] return xp.array(a, dtype=dtype2, order=dst_order) - @testing.for_orders('CFAK', name='src_order') - @testing.for_orders('CFAK', name='dst_order') - @testing.for_all_dtypes(name='dtype1', no_complex=True) - @testing.for_all_dtypes(name='dtype2') + @testing.for_orders("CFAK", name="src_order") + @testing.for_orders("CFAK", name="dst_order") + @testing.for_all_dtypes(name="dtype1", no_complex=True) + @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(strides_check=True) - def test_array_copy_list_of_cupy_with_dtype_char(self, xp, dtype1, dtype2, - src_order, dst_order): + def test_array_copy_list_of_cupy_with_dtype_char( + self, xp, dtype1, dtype2, src_order, dst_order + ): # compares numpy.array() with # cupy.array() a = [ testing.shaped_arange((3, 4), xp, dtype1, src_order) + (12 * i) - for i in range(2)] + for i in range(2) + ] return xp.array(a, dtype=numpy.dtype(dtype2).char, order=dst_order) - @testing.for_orders('CFAK') + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_no_copy(self, xp, dtype, order): @@ -271,11 +289,11 @@ def test_array_no_copy(self, xp, dtype, order): a.fill(0) return b - @testing.for_orders('CFAK') + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_array_f_contiguous_input(self, xp, dtype, order): - a = testing.shaped_arange((2, 3, 4), xp, dtype, order='F') + a = testing.shaped_arange((2, 3, 4), xp, dtype, order="F") b = xp.array(a, copy=False, order=order) return b @@ -283,33 +301,33 @@ def test_array_f_contiguous_input(self, xp, dtype, order): @testing.numpy_cupy_array_equal() def test_array_f_contiguous_output(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - b = xp.array(a, copy=False, order='F') + b = xp.array(a, copy=False, order="F") assert b.flags.f_contiguous return b # @testing.multi_gpu(2) # def test_array_multi_device(self): - # with cuda.Device(0): - # x = testing.shaped_arange((2, 3, 4), cupy, dtype='f') - # with cuda.Device(1): - # y = cupy.array(x) - # assert isinstance(y, cupy.ndarray) - # assert x is not y # Do copy - # assert int(x.device) == 0 - # assert int(y.device) == 1 - # testing.assert_array_equal(x, y) + # with cuda.Device(0): + # x = testing.shaped_arange((2, 3, 4), cupy, dtype='f') + # with cuda.Device(1): + # y = cupy.array(x) + # assert isinstance(y, cupy.ndarray) + # assert x is not y # Do copy + # assert int(x.device) == 0 + # assert int(y.device) == 1 + # testing.assert_array_equal(x, y) # @testing.multi_gpu(2) # def test_array_multi_device_zero_size(self): - # with cuda.Device(0): - # x = testing.shaped_arange((0,), cupy, dtype='f') - # with cuda.Device(1): - # y = cupy.array(x) - # assert isinstance(y, cupy.ndarray) - # assert x is not y # Do copy - # assert x.device.id == 0 - # assert y.device.id == 1 - # testing.assert_array_equal(x, y) + # with cuda.Device(0): + # x = testing.shaped_arange((0,), cupy, dtype='f') + # with cuda.Device(1): + # y = cupy.array(x) + # assert isinstance(y, cupy.ndarray) + # assert x is not y # Do copy + # assert x.device.id == 0 + # assert y.device.id == 1 + # testing.assert_array_equal(x, y) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -335,13 +353,13 @@ def test_asarray_is_not_copied(self, xp, dtype): return b # @testing.for_CF_orders() - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_asarray_with_order(self, xp, dtype, order): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = xp.asarray(a, order=order) - if order in ['F', 'f']: + if order in ["F", "f"]: assert b.flags.f_contiguous else: assert b.flags.c_contiguous @@ -363,20 +381,20 @@ def test_asarray_preserves_numpy_array_order(self, xp, dtype, order): def test_asanyarray_with_order(self, xp, dtype, order): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = xp.asanyarray(a, order=order) - if order in ['F', 'f']: + if order in ["F", "f"]: assert b.flags.f_contiguous else: assert b.flags.c_contiguous return b # @testing.for_CF_orders() - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_asarray_from_numpy(self, xp, dtype, order): a = testing.shaped_arange((2, 3, 4), numpy, dtype) b = xp.asarray(a, order=order) - if order in ['F', 'f']: + if order in ["F", "f"]: assert b.flags.f_contiguous else: assert b.flags.c_contiguous @@ -414,7 +432,7 @@ def test_asarray_cuda_array_zero_dim_dtype(self, xp): return xp.ascontiguousarray(a, dtype=numpy.int64) # @testing.for_CF_orders() - @testing.for_orders('C') + @testing.for_orders("C") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_copy(self, xp, dtype, order): @@ -427,11 +445,11 @@ def test_copy(self, xp, dtype, order): # @testing.for_CF_orders() # @testing.for_all_dtypes() # def test_copy_multigpu(self, dtype, order): - # with cuda.Device(0): - # src = cupy.random.uniform(-1, 1, (2, 3)).astype(dtype) - # with cuda.Device(1): - # dst = cupy.copy(src, order) - # testing.assert_allclose(src, dst, rtol=0, atol=0) + # with cuda.Device(0): + # src = cupy.random.uniform(-1, 1, (2, 3)).astype(dtype) + # with cuda.Device(1): + # dst = cupy.copy(src, order) + # testing.assert_allclose(src, dst, rtol=0, atol=0) @testing.for_CF_orders() @testing.numpy_cupy_equal() @@ -445,11 +463,11 @@ def test_asfortranarray_cuda_array_zero_dim(self, xp): a = xp.ones(()) return xp.asfortranarray(a) - @testing.for_all_dtypes_combination(['dtype_a', 'dtype_b'], - no_complex=True) + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"], no_complex=True) @testing.numpy_cupy_array_equal() def test_asfortranarray_cuda_array_zero_dim_dtype( - self, xp, dtype_a, dtype_b): + self, xp, dtype_a, dtype_b + ): a = xp.ones((), dtype=dtype_a) return xp.asfortranarray(a, dtype=dtype_b) @@ -471,136 +489,133 @@ def test_fromfile(self, xp): # @testing.gpu # @testing.parameterize(*testing.product({ - # 'ver': tuple(range(max_cuda_array_interface_version+1)), - # 'strides': (False, None, True), +# 'ver': tuple(range(max_cuda_array_interface_version+1)), +# 'strides': (False, None, True), # })) # class TestCudaArrayInterface(unittest.TestCase): - # @testing.for_all_dtypes() - # def test_base(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype) - # b = cupy.asarray( - # DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) - # testing.assert_array_equal(a, b) - - # @testing.for_all_dtypes() - # def test_not_copied(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype) - # b = cupy.asarray( - # DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) - # a.fill(0) - # testing.assert_array_equal(a, b) - - # @testing.for_all_dtypes() - # def test_order(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype) - # b = cupy.asarray( - # DummyObjectWithCudaArrayInterface(a, self.ver, self.strides), - # order='F') - # assert b.flags.f_contiguous - # testing.assert_array_equal(a, b) - - # @testing.for_all_dtypes() - # def test_with_strides(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype).T - # b = cupy.asarray( - # DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) - # assert a.strides == b.strides - # assert a.nbytes == b.data.mem.size - - # @testing.for_all_dtypes() - # def test_with_zero_size_array(self, dtype): - # a = testing.shaped_arange((0,), cupy, dtype) - # b = cupy.asarray( - # DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) - # assert a.strides == b.strides - # assert a.nbytes == b.data.mem.size - # assert a.data.ptr == 0 - # assert a.size == 0 - - # @testing.for_all_dtypes() - # def test_asnumpy(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype) - # b = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides) - # a_cpu = cupy.asnumpy(a) - # b_cpu = cupy.asnumpy(b) - # testing.assert_array_equal(a_cpu, b_cpu) +# @testing.for_all_dtypes() +# def test_base(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype) +# b = cupy.asarray( +# DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) +# testing.assert_array_equal(a, b) + +# @testing.for_all_dtypes() +# def test_not_copied(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype) +# b = cupy.asarray( +# DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) +# a.fill(0) +# testing.assert_array_equal(a, b) + +# @testing.for_all_dtypes() +# def test_order(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype) +# b = cupy.asarray( +# DummyObjectWithCudaArrayInterface(a, self.ver, self.strides), +# order='F') +# assert b.flags.f_contiguous +# testing.assert_array_equal(a, b) + +# @testing.for_all_dtypes() +# def test_with_strides(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype).T +# b = cupy.asarray( +# DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) +# assert a.strides == b.strides +# assert a.nbytes == b.data.mem.size + +# @testing.for_all_dtypes() +# def test_with_zero_size_array(self, dtype): +# a = testing.shaped_arange((0,), cupy, dtype) +# b = cupy.asarray( +# DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)) +# assert a.strides == b.strides +# assert a.nbytes == b.data.mem.size +# assert a.data.ptr == 0 +# assert a.size == 0 + +# @testing.for_all_dtypes() +# def test_asnumpy(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype) +# b = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides) +# a_cpu = cupy.asnumpy(a) +# b_cpu = cupy.asnumpy(b) +# testing.assert_array_equal(a_cpu, b_cpu) # @testing.gpu # @testing.parameterize(*testing.product({ - # 'ver': tuple(range(1, max_cuda_array_interface_version+1)), - # 'strides': (False, None, True), +# 'ver': tuple(range(1, max_cuda_array_interface_version+1)), +# 'strides': (False, None, True), # })) # class TestCudaArrayInterfaceMaskedArray(unittest.TestCase): - # # TODO(leofang): update this test when masked array is supported - # @testing.for_all_dtypes() - # def test_masked_array(self, dtype): - # a = testing.shaped_arange((2, 3, 4), cupy, dtype) - # mask = testing.shaped_arange((2, 3, 4), cupy, dtype) - # a = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides, mask) - # with pytest.raises(ValueError) as ex: - # b = cupy.asarray(a) # noqa - # assert 'does not support' in str(ex.value) +# # TODO(leofang): update this test when masked array is supported +# @testing.for_all_dtypes() +# def test_masked_array(self, dtype): +# a = testing.shaped_arange((2, 3, 4), cupy, dtype) +# mask = testing.shaped_arange((2, 3, 4), cupy, dtype) +# a = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides, mask) +# with pytest.raises(ValueError) as ex: +# b = cupy.asarray(a) # noqa +# assert 'does not support' in str(ex.value) # @testing.slow # @testing.gpu # class TestCudaArrayInterfaceBigArray(unittest.TestCase): - # def test_with_over_size_array(self): - # # real example from #3009 - # size = 5 * 10**8 - # try: - # a = testing.shaped_random((size,), cupy, cupy.float64) - # b = cupy.asarray(DummyObjectWithCudaArrayInterface(a, 2, None)) - # testing.assert_array_equal(a, b) - # except cupy.cuda.memory.OutOfMemoryError: - # pass - # else: - # del b, a - # finally: - # cupy.get_default_memory_pool().free_all_blocks() +# def test_with_over_size_array(self): +# # real example from #3009 +# size = 5 * 10**8 +# try: +# a = testing.shaped_random((size,), cupy, cupy.float64) +# b = cupy.asarray(DummyObjectWithCudaArrayInterface(a, 2, None)) +# testing.assert_array_equal(a, b) +# except cupy.cuda.memory.OutOfMemoryError: +# pass +# else: +# del b, a +# finally: +# cupy.get_default_memory_pool().free_all_blocks() # class DummyObjectWithCudaArrayInterface(object): - # def __init__(self, a, ver, include_strides=False, mask=None): - # assert ver in tuple(range(max_cuda_array_interface_version+1)) - # self.a = a - # self.ver = ver - # self.include_strides = include_strides - # self.mask = mask - - # @property - # def __cuda_array_interface__(self): - # desc = { - # 'shape': self.a.shape, - # 'typestr': self.a.dtype.str, - # 'descr': self.a.dtype.descr, - # 'data': (self.a.data.ptr, False), - # 'version': self.ver, - # } - # if self.a.flags.c_contiguous: - # if self.include_strides is True: - # desc['strides'] = self.a.strides - # elif self.include_strides is None: - # desc['strides'] = None - # else: # self.include_strides is False - # pass - # else: # F contiguous or neither - # desc['strides'] = self.a.strides - # if self.mask is not None: - # desc['mask'] = self.mask - # return desc +# def __init__(self, a, ver, include_strides=False, mask=None): +# assert ver in tuple(range(max_cuda_array_interface_version+1)) +# self.a = a +# self.ver = ver +# self.include_strides = include_strides +# self.mask = mask + +# @property +# def __cuda_array_interface__(self): +# desc = { +# 'shape': self.a.shape, +# 'typestr': self.a.dtype.str, +# 'descr': self.a.dtype.descr, +# 'data': (self.a.data.ptr, False), +# 'version': self.ver, +# } +# if self.a.flags.c_contiguous: +# if self.include_strides is True: +# desc['strides'] = self.a.strides +# elif self.include_strides is None: +# desc['strides'] = None +# else: # self.include_strides is False +# pass +# else: # F contiguous or neither +# desc['strides'] = self.a.strides +# if self.mask is not None: +# desc['mask'] = self.mask +# return desc @testing.parameterize( - *testing.product({ - 'ndmin': [0, 1, 2, 3], - 'copy': [True, False], - 'xp': [numpy, cupy] - }) + *testing.product( + {"ndmin": [0, 1, 2, 3], "copy": [True, False], "xp": [numpy, cupy]} + ) ) class TestArrayPreservationOfShape(unittest.TestCase): - @testing.for_all_dtypes() def test_cupy_array(self, dtype): shape = 2, 3 @@ -613,14 +628,11 @@ def test_cupy_array(self, dtype): @testing.parameterize( - *testing.product({ - 'ndmin': [0, 1, 2, 3], - 'copy': [True, False], - 'xp': [numpy, cupy] - }) + *testing.product( + {"ndmin": [0, 1, 2, 3], "copy": [True, False], "xp": [numpy, cupy]} + ) ) class TestArrayCopy(unittest.TestCase): - @testing.for_all_dtypes() def test_cupy_array(self, dtype): a = testing.shaped_arange((2, 3), self.xp, dtype) @@ -628,13 +640,15 @@ def test_cupy_array(self, dtype): should_copy = (self.xp is numpy) or self.copy # TODO(Kenta Oono): Better determination of copy. - is_copied = not ((actual is a) or (actual.base is a) or - (actual.base is a.base and a.base is not None)) + is_copied = not ( + (actual is a) + or (actual.base is a) + or (actual.base is a.base and a.base is not None) + ) assert should_copy == is_copied class TestArrayInvalidObject(unittest.TestCase): - def test_invalid_type(self): a = numpy.array([1, 2, 3], dtype=object) # with self.assertRaises(ValueError): diff --git a/tests/third_party/cupy/creation_tests/test_matrix.py b/tests/third_party/cupy/creation_tests/test_matrix.py index 5b2fb7cc51a6..c8028c0f14a1 100644 --- a/tests/third_party/cupy/creation_tests/test_matrix.py +++ b/tests/third_party/cupy/creation_tests/test_matrix.py @@ -9,7 +9,6 @@ @testing.gpu class TestMatrix(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_diag1(self, xp): a = testing.shaped_arange((3, 3), xp) @@ -104,13 +103,12 @@ def test_diagflat_from_scalar_with_k1(self, xp): @testing.parameterize( - {'shape': (2,)}, - {'shape': (3, 3)}, - {'shape': (4, 3)}, + {"shape": (2,)}, + {"shape": (3, 3)}, + {"shape": (4, 3)}, ) @testing.gpu class TestTri(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_tri(self, xp, dtype): @@ -128,14 +126,13 @@ def test_tri_posi(self, xp, dtype): @testing.parameterize( - {'shape': (2,)}, - {'shape': (3, 3)}, - {'shape': (4, 3)}, - {'shape': (2, 3, 4)}, + {"shape": (2,)}, + {"shape": (3, 3)}, + {"shape": (4, 3)}, + {"shape": (2, 3, 4)}, ) @testing.gpu class TestTriLowerAndUpper(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_tril(self, xp, dtype): diff --git a/tests/third_party/cupy/creation_tests/test_ranges.py b/tests/third_party/cupy/creation_tests/test_ranges.py index d765858023c2..51c34f5c6229 100644 --- a/tests/third_party/cupy/creation_tests/test_ranges.py +++ b/tests/third_party/cupy/creation_tests/test_ranges.py @@ -11,7 +11,6 @@ @testing.gpu class TestRanges(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_arange(self, xp, dtype): @@ -87,18 +86,20 @@ def test_linspace_zero_num(self, xp, dtype): @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_linspace_zero_num_no_endopoint_with_retstep(self, xp, dtype): - x, step = xp.linspace(0, 10, 0, dtype=dtype, endpoint=False, - retstep=True) + x, step = xp.linspace( + 0, 10, 0, dtype=dtype, endpoint=False, retstep=True + ) self.assertTrue(math.isnan(step)) return x - @testing.with_requires('numpy>=1.18') + @testing.with_requires("numpy>=1.18") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_linspace_one_num_no_endopoint_with_retstep(self, xp, dtype): start, stop = 3, 7 - x, step = xp.linspace(start, stop, 1, dtype=dtype, endpoint=False, - retstep=True) + x, step = xp.linspace( + start, stop, 1, dtype=dtype, endpoint=False, retstep=True + ) self.assertEqual(step, stop - start) return x @@ -138,7 +139,7 @@ def test_linspace_neg_num(self): @testing.numpy_cupy_allclose() def test_linspace_float_overflow(self, xp): - return xp.linspace(0., sys.float_info.max / 5, 10, dtype=float) + return xp.linspace(0.0, sys.float_info.max / 5, 10, dtype=float) @testing.numpy_cupy_array_equal() def test_linspace_float_underflow(self, xp): @@ -146,51 +147,55 @@ def test_linspace_float_underflow(self, xp): x = sys.float_info.min while x / 2 > 0: x /= 2 - return xp.linspace(0., x, 10, dtype=float) + return xp.linspace(0.0, x, 10, dtype=float) - @testing.with_requires('numpy>=1.16') - @testing.for_all_dtypes_combination(names=('dtype_range', 'dtype_out'), - no_bool=True, no_complex=True) + @testing.with_requires("numpy>=1.16") + @testing.for_all_dtypes_combination( + names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + ) @testing.numpy_cupy_array_equal() def test_linspace_array_start_stop(self, xp, dtype_range, dtype_out): start = xp.array([0, 120], dtype=dtype_range) stop = xp.array([100, 0], dtype=dtype_range) return xp.linspace(start, stop, num=50, dtype=dtype_out) - @testing.with_requires('numpy>=1.16') - @testing.for_all_dtypes_combination(names=('dtype_range', 'dtype_out'), - no_bool=True, no_complex=True) + @testing.with_requires("numpy>=1.16") + @testing.for_all_dtypes_combination( + names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + ) @testing.numpy_cupy_array_equal() def test_linspace_mixed_start_stop(self, xp, dtype_range, dtype_out): start = 0.0 - if xp.dtype(dtype_range).kind in 'u': + if xp.dtype(dtype_range).kind in "u": stop = xp.array([100, 16], dtype=dtype_range) else: stop = xp.array([100, -100], dtype=dtype_range) return xp.linspace(start, stop, num=50, dtype=dtype_out) - @testing.with_requires('numpy>=1.16') - @testing.for_all_dtypes_combination(names=('dtype_range', 'dtype_out'), - no_bool=True, no_complex=True) + @testing.with_requires("numpy>=1.16") + @testing.for_all_dtypes_combination( + names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + ) @testing.numpy_cupy_array_equal() def test_linspace_mixed_start_stop2(self, xp, dtype_range, dtype_out): - if xp.dtype(dtype_range).kind in 'u': + if xp.dtype(dtype_range).kind in "u": start = xp.array([160, 120], dtype=dtype_range) else: start = xp.array([-120, 120], dtype=dtype_range) stop = 0 return xp.linspace(start, stop, num=50, dtype=dtype_out) - @testing.with_requires('numpy>=1.16') - @testing.for_all_dtypes_combination(names=('dtype_range', 'dtype_out'), - no_bool=True, no_complex=True) + @testing.with_requires("numpy>=1.16") + @testing.for_all_dtypes_combination( + names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + ) @testing.numpy_cupy_array_equal() def test_linspace_array_start_stop_axis1(self, xp, dtype_range, dtype_out): start = xp.array([0, 120], dtype=dtype_range) stop = xp.array([100, 0], dtype=dtype_range) return xp.linspace(start, stop, num=50, dtype=dtype_out, axis=1) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") @testing.for_complex_dtypes() @testing.numpy_cupy_array_equal() def test_linspace_complex_start_stop(self, xp, dtype): @@ -198,7 +203,7 @@ def test_linspace_complex_start_stop(self, xp, dtype): stop = xp.array([100, 0], dtype=dtype) return xp.linspace(start, stop, num=50, dtype=dtype) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_linspace_start_stop_list(self, xp, dtype): @@ -255,35 +260,39 @@ def test_logspace_base(self, xp, dtype): @testing.parameterize( - *testing.product({ - 'indexing': ['xy', 'ij'], - 'sparse': [False, True], - 'copy': [False, True], - }) + *testing.product( + { + "indexing": ["xy", "ij"], + "sparse": [False, True], + "copy": [False, True], + } + ) ) @testing.gpu class TestMeshgrid(unittest.TestCase): - @testing.for_all_dtypes() def test_meshgrid0(self, dtype): - out = cupy.meshgrid(indexing=self.indexing, sparse=self.sparse, - copy=self.copy) - assert(out == []) + out = cupy.meshgrid( + indexing=self.indexing, sparse=self.sparse, copy=self.copy + ) + assert out == [] @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_meshgrid1(self, xp, dtype): x = xp.arange(2).astype(dtype) - return xp.meshgrid(x, indexing=self.indexing, sparse=self.sparse, - copy=self.copy) + return xp.meshgrid( + x, indexing=self.indexing, sparse=self.sparse, copy=self.copy + ) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_meshgrid2(self, xp, dtype): x = xp.arange(2).astype(dtype) y = xp.arange(3).astype(dtype) - return xp.meshgrid(x, y, indexing=self.indexing, sparse=self.sparse, - copy=self.copy) + return xp.meshgrid( + x, y, indexing=self.indexing, sparse=self.sparse, copy=self.copy + ) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -291,13 +300,13 @@ def test_meshgrid3(self, xp, dtype): x = xp.arange(2).astype(dtype) y = xp.arange(3).astype(dtype) z = xp.arange(4).astype(dtype) - return xp.meshgrid(x, y, z, indexing=self.indexing, sparse=self.sparse, - copy=self.copy) + return xp.meshgrid( + x, y, z, indexing=self.indexing, sparse=self.sparse, copy=self.copy + ) @testing.gpu class TestMgrid(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_mgrid0(self, xp): return xp.mgrid[0:] @@ -331,7 +340,6 @@ def test_mgrid5(self, xp): @testing.gpu class TestOgrid(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_ogrid0(self, xp): return xp.ogrid[0:] diff --git a/tests/third_party/cupy/fft_tests/test_fft.py b/tests/third_party/cupy/fft_tests/test_fft.py index cf23e4a5e902..0aefe81a4ed9 100644 --- a/tests/third_party/cupy/fft_tests/test_fft.py +++ b/tests/third_party/cupy/fft_tests/test_fft.py @@ -1,25 +1,29 @@ import functools -import pytest import string import unittest import numpy as np +import pytest import dpnp as cupy from tests.third_party.cupy import testing -@testing.parameterize(*testing.product({ - 'n': [None, 0, 5, 10, 15], - 'shape': [(0,), (10, 0), (10,), (10, 10)], - 'norm': [None, 'ortho', ''], -})) +@testing.parameterize( + *testing.product( + { + "n": [None, 0, 5, 10, 15], + "shape": [(0,), (10, 0), (10,), (10, 10)], + "norm": [None, "ortho", ""], + } + ) +) @testing.gpu class TestFft(unittest.TestCase): - @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_fft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.fft(a, n=self.n, norm=self.norm) @@ -27,8 +31,9 @@ def test_fft(self, xp, dtype): return out @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_ifft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.ifft(a, n=self.n, norm=self.norm) @@ -37,36 +42,36 @@ def test_ifft(self, xp, dtype): @testing.parameterize( - {'shape': (3, 4), 's': None, 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'}, - {'shape': (3, 4), 's': None, 'axes': (), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'}, - {'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None}, - {'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'}, - {'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (0, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (2, 0, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (0, 0, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (0, 5), 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (1, 0), 'axes': None, 'norm': None}, + {"shape": (3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, None), "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, 5), "axes": None, "norm": None}, + {"shape": (3, 4), "s": None, "axes": (-2, -1), "norm": None}, + {"shape": (3, 4), "s": None, "axes": (-1, -2), "norm": None}, + {"shape": (3, 4), "s": None, "axes": (0,), "norm": None}, + {"shape": (3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (3, 4), "s": None, "axes": (), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": (1, 4, None), "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (2, 3, 4), "s": None, "axes": (), "norm": None}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2), "norm": "ortho"}, + {"shape": (2, 3, 4, 5), "s": None, "axes": None, "norm": None}, + {"shape": (0, 5), "s": None, "axes": None, "norm": None}, + {"shape": (2, 0, 5), "s": None, "axes": None, "norm": None}, + {"shape": (0, 0, 5), "s": None, "axes": None, "norm": None}, + {"shape": (3, 4), "s": (0, 5), "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, 0), "axes": None, "norm": None}, ) @testing.gpu class TestFft2(unittest.TestCase): - @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_fft2(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.fft2(a, s=self.s, axes=self.axes, norm=self.norm) @@ -74,8 +79,9 @@ def test_fft2(self, xp, dtype): return out @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_ifft2(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.ifft2(a, s=self.s, axes=self.axes, norm=self.norm) @@ -84,37 +90,37 @@ def test_ifft2(self, xp, dtype): @testing.parameterize( - {'shape': (3, 4), 's': None, 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': [-1, -2], 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': (), 'norm': None}, - {'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'}, - {'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (-1, -3), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'}, - {'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': 'ortho'}, - {'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'}, - {'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1), 'norm': 'ortho'}, - {'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (0, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (2, 0, 5), 's': None, 'axes': None, 'norm': None}, - {'shape': (0, 0, 5), 's': None, 'axes': None, 'norm': None}, + {"shape": (3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, None), "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, 5), "axes": None, "norm": None}, + {"shape": (3, 4), "s": None, "axes": (-2, -1), "norm": None}, + {"shape": (3, 4), "s": None, "axes": (-1, -2), "norm": None}, + {"shape": (3, 4), "s": None, "axes": [-1, -2], "norm": None}, + {"shape": (3, 4), "s": None, "axes": (0,), "norm": None}, + {"shape": (3, 4), "s": None, "axes": (), "norm": None}, + {"shape": (3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": (1, 4, None), "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -3), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (2, 3, 4), "s": None, "axes": (), "norm": "ortho"}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2), "norm": "ortho"}, + {"shape": (2, 3, 4), "s": (4, 3, 2), "axes": (2, 0, 1), "norm": "ortho"}, + {"shape": (2, 3, 4, 5), "s": None, "axes": None, "norm": None}, + {"shape": (0, 5), "s": None, "axes": None, "norm": None}, + {"shape": (2, 0, 5), "s": None, "axes": None, "norm": None}, + {"shape": (0, 0, 5), "s": None, "axes": None, "norm": None}, ) @testing.gpu class TestFftn(unittest.TestCase): - @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_fftn(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm) @@ -122,8 +128,9 @@ def test_fftn(self, xp, dtype): return out @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, - contiguous_check=False) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) def test_ifftn(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm) @@ -131,14 +138,17 @@ def test_ifftn(self, xp, dtype): return out -@testing.parameterize(*testing.product({ - 'n': [None, 5, 10, 15], - 'shape': [(10,), (10, 10)], - 'norm': [None, 'ortho'], -})) +@testing.parameterize( + *testing.product( + { + "n": [None, 5, 10, 15], + "shape": [(10,), (10, 10)], + "norm": [None, "ortho"], + } + ) +) @testing.gpu class TestRfft(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) def test_rfft(self, xp, dtype): @@ -157,12 +167,11 @@ def test_irfft(self, xp, dtype): @testing.parameterize( - {'shape': (3, 4), 's': None, 'axes': (), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None}, + {"shape": (3, 4), "s": None, "axes": (), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (), "norm": None}, ) @testing.gpu class TestRfft2EmptyAxes(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) def test_rfft2(self, dtype): for xp in (np, cupy): @@ -179,12 +188,11 @@ def test_irfft2(self, dtype): @testing.parameterize( - {'shape': (3, 4), 's': None, 'axes': (), 'norm': None}, - {'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None}, + {"shape": (3, 4), "s": None, "axes": (), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (), "norm": None}, ) @testing.gpu class TestRfftnEmptyAxes(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) def test_rfftn(self, dtype): for xp in (np, cupy): @@ -200,14 +208,17 @@ def test_irfftn(self, dtype): xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) -@testing.parameterize(*testing.product({ - 'n': [None, 5, 10, 15], - 'shape': [(10,), (10, 10)], - 'norm': [None, 'ortho'], -})) +@testing.parameterize( + *testing.product( + { + "n": [None, 5, 10, 15], + "shape": [(10,), (10, 10)], + "norm": [None, "ortho"], + } + ) +) @testing.gpu class TestHfft(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) def test_hfft(self, xp, dtype): @@ -226,13 +237,12 @@ def test_ihfft(self, xp, dtype): @testing.parameterize( - {'n': 1, 'd': 1}, - {'n': 10, 'd': 0.5}, - {'n': 100, 'd': 2}, + {"n": 1, "d": 1}, + {"n": 10, "d": 0.5}, + {"n": 100, "d": 2}, ) @testing.gpu class TestFftfreq(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) def test_fftfreq(self, xp, dtype): @@ -249,17 +259,16 @@ def test_rfftfreq(self, xp, dtype): @testing.parameterize( - {'shape': (5,), 'axes': None}, - {'shape': (5,), 'axes': 0}, - {'shape': (10,), 'axes': None}, - {'shape': (10,), 'axes': 0}, - {'shape': (10, 10), 'axes': None}, - {'shape': (10, 10), 'axes': 0}, - {'shape': (10, 10), 'axes': (0, 1)}, + {"shape": (5,), "axes": None}, + {"shape": (5,), "axes": 0}, + {"shape": (10,), "axes": None}, + {"shape": (10,), "axes": 0}, + {"shape": (10, 10), "axes": None}, + {"shape": (10, 10), "axes": 0}, + {"shape": (10, 10), "axes": (0, 1)}, ) @testing.gpu class TestFftshift(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) def test_fftshift(self, xp, dtype): diff --git a/tests/third_party/cupy/indexing_tests/test_generate.py b/tests/third_party/cupy/indexing_tests/test_generate.py index d9be22ed28aa..15cd78ff0230 100644 --- a/tests/third_party/cupy/indexing_tests/test_generate.py +++ b/tests/third_party/cupy/indexing_tests/test_generate.py @@ -9,7 +9,6 @@ @testing.gpu class TestIndices(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_indices_list0(self, xp, dtype): @@ -33,7 +32,6 @@ def test_indices_list3(self): @testing.gpu class TestIX_(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_ix_list(self, xp): return xp.ix_([0, 1], [2, 4]) @@ -54,7 +52,6 @@ def test_ix_bool_ndarray(self, xp): @testing.gpu class TestR_(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_r_1(self, xp, dtype): @@ -84,19 +81,19 @@ def test_r_3(self): def test_r_4(self, dtype): a = testing.shaped_arange((1, 3), cupy, dtype) with self.assertRaises(NotImplementedError): - cupy.r_['-1', a, a] + cupy.r_["-1", a, a] def test_r_5(self): with self.assertRaises(NotImplementedError): - cupy.r_['0,2', [1, 2, 3], [4, 5, 6]] + cupy.r_["0,2", [1, 2, 3], [4, 5, 6]] def test_r_6(self): with self.assertRaises(NotImplementedError): - cupy.r_['0,2,0', [1, 2, 3], [4, 5, 6]] + cupy.r_["0,2,0", [1, 2, 3], [4, 5, 6]] def test_r_7(self): with self.assertRaises(NotImplementedError): - cupy.r_['r', [1, 2, 3], [4, 5, 6]] + cupy.r_["r", [1, 2, 3], [4, 5, 6]] @testing.for_all_dtypes() def test_r_9(self, dtype): @@ -108,7 +105,6 @@ def test_r_9(self, dtype): @testing.gpu class TestC_(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_c_1(self, xp, dtype): @@ -134,7 +130,6 @@ def test_c_3(self, dtype): @testing.gpu class TestAxisConcatenator(unittest.TestCase): - def test_AxisConcatenator_init1(self): with self.assertRaises(TypeError): generate.AxisConcatenator.__init__() @@ -146,8 +141,7 @@ def test_len(self): @testing.gpu class TestUnravelIndex(unittest.TestCase): - - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes() @testing.numpy_cupy_array_equal(type_check=False) def test(self, xp, order, dtype): @@ -155,15 +149,15 @@ def test(self, xp, order, dtype): a = xp.minimum(a, 6 * 4 - 1) return xp.unravel_index(a, (6, 4), order=order) - @testing.with_requires('numpy>=1.19') + @testing.with_requires("numpy>=1.19") @testing.for_int_dtypes() def test_invalid_order(self, dtype): for xp in (numpy, cupy): a = testing.shaped_arange((4, 3, 2), xp, dtype) with pytest.raises(ValueError): - xp.unravel_index(a, (6, 4), order='V') + xp.unravel_index(a, (6, 4), order="V") - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) def test_invalid_index(self, order, dtype): for xp in (numpy, cupy): @@ -171,7 +165,7 @@ def test_invalid_index(self, order, dtype): with pytest.raises(ValueError): xp.unravel_index(a, (6, 4), order=order) - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_float_dtypes() def test_invalid_dtype(self, order, dtype): for xp in (numpy, cupy): @@ -182,8 +176,7 @@ def test_invalid_dtype(self, order, dtype): @testing.gpu class TestRavelMultiIndex(unittest.TestCase): - - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes() @testing.numpy_cupy_array_equal() def test_basic(self, xp, order, dtype): @@ -191,16 +184,17 @@ def test_basic(self, xp, order, dtype): a = [xp.ones(5, dtype=dtype)] * len(dims) return xp.ravel_multi_index(a, dims, order=order) - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_multi_index_broadcasting(self, xp, order, dtype): dims = (3, 5) - x, y = xp.meshgrid(*[xp.arange(s, dtype=dtype) for s in dims], - sparse=True) + x, y = xp.meshgrid( + *[xp.arange(s, dtype=dtype) for s in dims], sparse=True + ) return xp.ravel_multi_index((x, y), dims, order=order) - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes() @testing.numpy_cupy_array_equal() def test_basic_nd_coords(self, xp, order, dtype): @@ -208,37 +202,37 @@ def test_basic_nd_coords(self, xp, order, dtype): a = [xp.ones((3, 3, 3), dtype=dtype)] * len(dims) return xp.ravel_multi_index(a, dims, order=order) - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_basic_clip(self, xp, order, dtype): dims = (8, 4, 2) a = [xp.arange(max(dims), dtype=dtype)] * len(dims) - return xp.ravel_multi_index(a, dims, order=order, mode='clip') + return xp.ravel_multi_index(a, dims, order=order, mode="clip") - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_basic_wrap(self, xp, order, dtype): dims = (8, 4, 2) a = [xp.arange(max(dims), dtype=dtype)] * len(dims) - return xp.ravel_multi_index(a, dims, order=order, mode='wrap') + return xp.ravel_multi_index(a, dims, order=order, mode="wrap") - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) def test_basic_raise(self, order, dtype): for xp in (numpy, cupy): dims = (8, 4, 2) a = [xp.arange(max(dims), dtype=dtype)] * len(dims) with pytest.raises(ValueError): - return xp.ravel_multi_index(a, dims, order=order, mode='raise') + return xp.ravel_multi_index(a, dims, order=order, mode="raise") @testing.for_int_dtypes() def test_invalid_float_dims(self, dtype): for xp in (numpy, cupy): a = xp.ones((3, 5), dtype=dtype) with pytest.raises(TypeError): - xp.ravel_multi_index(a, (2., 4, 8.)) + xp.ravel_multi_index(a, (2.0, 4, 8.0)) @testing.for_float_dtypes() def test_invalid_multi_index_dtype(self, dtype): @@ -247,7 +241,7 @@ def test_invalid_multi_index_dtype(self, dtype): with pytest.raises(TypeError): xp.ravel_multi_index(a, (2, 4, 8)) - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) def test_invalid_multi_index_shape(self, order, dtype): for xp in (numpy, cupy): @@ -257,16 +251,16 @@ def test_invalid_multi_index_shape(self, order, dtype): with pytest.raises(ValueError): xp.ravel_multi_index(a, dims, order=order) - @testing.with_requires('numpy>=1.19') + @testing.with_requires("numpy>=1.19") @testing.for_int_dtypes(no_bool=True) def test_invalid_order(self, dtype): for xp in (numpy, cupy): dims = (8, 4) a = tuple([xp.arange(min(dims), dtype=dtype) for d in dims]) with pytest.raises(ValueError): - xp.ravel_multi_index(a, dims, order='V') + xp.ravel_multi_index(a, dims, order="V") - @testing.for_orders(['C', 'F', None]) + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes(no_bool=True) def test_dims_overflow(self, order, dtype): for xp in (numpy, cupy): @@ -274,13 +268,14 @@ def test_dims_overflow(self, order, dtype): a = tuple([xp.arange(min(dims), dtype=dtype) for d in dims]) with pytest.raises(ValueError): xp.ravel_multi_index( - a, (xp.iinfo(xp.int64).max, 4), order=order) + a, (xp.iinfo(xp.int64).max, 4), order=order + ) - @testing.with_requires('numpy>=1.19') + @testing.with_requires("numpy>=1.19") @testing.for_int_dtypes(no_bool=True) def test_invalid_mode(self, dtype): for xp in (numpy, cupy): dims = (8, 4) a = tuple([xp.arange(min(dims), dtype=dtype) for d in dims]) with pytest.raises(ValueError): - xp.ravel_multi_index(a, dims, mode='invalid') + xp.ravel_multi_index(a, dims, mode="invalid") diff --git a/tests/third_party/cupy/indexing_tests/test_indexing.py b/tests/third_party/cupy/indexing_tests/test_indexing.py index e3b6b18162b0..be65eb2da70d 100644 --- a/tests/third_party/cupy/indexing_tests/test_indexing.py +++ b/tests/third_party/cupy/indexing_tests/test_indexing.py @@ -9,7 +9,6 @@ @testing.gpu class TestIndexing(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_take_by_scalar(self, xp): a = testing.shaped_arange((2, 4, 3), xp) @@ -50,14 +49,14 @@ def test_take_index_range_overflow(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_take_along_axis(self, xp): - a = testing.shaped_random((2, 4, 3), xp, dtype='float32') - b = testing.shaped_random((2, 6, 3), xp, dtype='int64', scale=4) + a = testing.shaped_random((2, 4, 3), xp, dtype="float32") + b = testing.shaped_random((2, 6, 3), xp, dtype="int64", scale=4) return xp.take_along_axis(a, b, axis=-2) @testing.numpy_cupy_array_equal() def test_take_along_axis_none_axis(self, xp): - a = testing.shaped_random((2, 4, 3), xp, dtype='float32') - b = testing.shaped_random((30,), xp, dtype='int64', scale=24) + a = testing.shaped_random((2, 4, 3), xp, dtype="float32") + b = testing.shaped_random((30,), xp, dtype="int64", scale=24) return xp.take_along_axis(a, b, axis=None) @testing.numpy_cupy_array_equal() @@ -148,9 +147,9 @@ def test_diagonal_invalid2(self): @testing.numpy_cupy_array_equal() def test_extract(self, xp): a = testing.shaped_arange((3, 3), xp) - b = xp.array([[True, False, True], - [False, True, False], - [True, False, True]]) + b = xp.array( + [[True, False, True], [False, True, False], [True, False, True]] + ) return xp.extract(b, a) @testing.for_all_dtypes() @@ -163,23 +162,19 @@ def test_extract_no_bool(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_extract_shape_mismatch(self, xp): a = testing.shaped_arange((2, 3), xp) - b = xp.array([[True, False], - [True, False], - [True, False]]) + b = xp.array([[True, False], [True, False], [True, False]]) return xp.extract(b, a) @testing.numpy_cupy_array_equal() def test_extract_size_mismatch(self, xp): a = testing.shaped_arange((3, 3), xp) - b = xp.array([[True, False, True], - [False, True, False]]) + b = xp.array([[True, False, True], [False, True, False]]) return xp.extract(b, a) @testing.numpy_cupy_array_equal() def test_extract_size_mismatch2(self, xp): a = testing.shaped_arange((3, 3), xp) - b = xp.array([[True, False, True, False], - [False, True, False, True]]) + b = xp.array([[True, False, True, False], [False, True, False, True]]) return xp.extract(b, a) @testing.numpy_cupy_array_equal() @@ -191,7 +186,6 @@ def test_extract_empty_1dim(self, xp): @testing.gpu class TestChoose(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_choose(self, xp, dtype): @@ -218,22 +212,22 @@ def test_choose_broadcast2(self, xp, dtype): def test_choose_wrap(self, xp, dtype): a = xp.array([0, 3, -1, 5]) c = testing.shaped_arange((3, 4), xp, dtype) - return a.choose(c, mode='wrap') + return a.choose(c, mode="wrap") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_choose_clip(self, xp, dtype): a = xp.array([0, 3, -1, 5]) c = testing.shaped_arange((3, 4), xp, dtype) - return a.choose(c, mode='clip') + return a.choose(c, mode="clip") - @testing.with_requires('numpy>=1.19') + @testing.with_requires("numpy>=1.19") def test_unknown_clip(self): for xp in (numpy, cupy): a = xp.array([0, 3, -1, 5]) c = testing.shaped_arange((3, 4), xp, numpy.float32) with pytest.raises(ValueError): - a.choose(c, mode='unknow') + a.choose(c, mode="unknow") def test_raise(self): a = cupy.array([2]) @@ -252,7 +246,6 @@ def test_choose_broadcast_fail(self, dtype): @testing.gpu class TestSelect(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_select(self, xp, dtype): diff --git a/tests/third_party/cupy/indexing_tests/test_insert.py b/tests/third_party/cupy/indexing_tests/test_insert.py index 722e3ee2a1ff..87e20ac1f938 100644 --- a/tests/third_party/cupy/indexing_tests/test_insert.py +++ b/tests/third_party/cupy/indexing_tests/test_insert.py @@ -7,10 +7,14 @@ from tests.third_party.cupy import testing -@testing.parameterize(*testing.product({ - 'shape': [(7,), (2, 3), (4, 3, 2)], - 'n_vals': [0, 1, 3, 15], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(7,), (2, 3), (4, 3, 2)], + "n_vals": [0, 1, 3, 15], + } + ) +) @testing.gpu class TestPlace(unittest.TestCase): @@ -29,15 +33,19 @@ def test_place(self, xp, dtype): return a -@testing.parameterize(*testing.product({ - 'shape': [(7,), (2, 3), (4, 3, 2)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(7,), (2, 3), (4, 3, 2)], + } + ) +) @testing.gpu class TestPlaceRaises(unittest.TestCase): # NumPy 1.9 performs illegal memory access. # https://github.com/numpy/numpy/pull/5821 - @testing.with_requires('numpy>=1.10') + @testing.with_requires("numpy>=1.10") @testing.for_all_dtypes() def test_place_empty_value_error(self, dtype): for xp in (numpy, cupy): @@ -59,20 +67,23 @@ def test_place_shape_unmatch_error(self, dtype): xp.place(a, mask, vals) -@testing.parameterize(*testing.product({ - 'shape': [(7,), (2, 3), (4, 3, 2)], - 'mode': ['raise', 'wrap', 'clip'], - 'n_vals': [0, 1, 3, 4, 5], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(7,), (2, 3), (4, 3, 2)], + "mode": ["raise", "wrap", "clip"], + "n_vals": [0, 1, 3, 4, 5], + } + ) +) @testing.gpu class TestPut(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_put(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype) # Take care so that actual indices don't overlap. - if self.mode == 'raise': + if self.mode == "raise": inds = xp.array([2, -1, 3, 0]) else: inds = xp.array([2, -8, 3, 7]) @@ -81,12 +92,15 @@ def test_put(self, xp, dtype): return a -@testing.parameterize(*testing.product({ - 'shape': [(7,), (2, 3), (4, 3, 2)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(7,), (2, 3), (4, 3, 2)], + } + ) +) @testing.gpu class TestPutScalars(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_put_index_scalar(self, xp): dtype = cupy.float32 @@ -107,12 +121,15 @@ def test_put_values_scalar(self, xp): return a -@testing.parameterize(*testing.product({ - 'shape': [(7,), (2, 3)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(7,), (2, 3)], + } + ) +) @testing.gpu class TestPutRaises(unittest.TestCase): - @testing.for_all_dtypes() def test_put_inds_underflow_error(self, dtype): for xp in (numpy, cupy): @@ -120,7 +137,7 @@ def test_put_inds_underflow_error(self, dtype): inds = xp.array([2, -8, 3, 0]) vals = testing.shaped_random((4,), xp, dtype) with pytest.raises(IndexError): - xp.put(a, inds, vals, mode='raise') + xp.put(a, inds, vals, mode="raise") @testing.for_all_dtypes() def test_put_inds_overflow_error(self, dtype): @@ -129,9 +146,9 @@ def test_put_inds_overflow_error(self, dtype): inds = xp.array([2, -1, 3, 7]) vals = testing.shaped_random((4,), xp, dtype) with pytest.raises(IndexError): - xp.put(a, inds, vals, mode='raise') + xp.put(a, inds, vals, mode="raise") - @testing.with_requires('numpy>=1.19') + @testing.with_requires("numpy>=1.19") @testing.for_all_dtypes() def test_put_mode_error(self, dtype): for xp in (numpy, cupy): @@ -139,15 +156,14 @@ def test_put_mode_error(self, dtype): inds = xp.array([2, -1, 3, 0]) vals = testing.shaped_random((4,), xp, dtype) with pytest.raises(ValueError): - xp.put(a, inds, vals, mode='unknown') + xp.put(a, inds, vals, mode="unknown") @testing.parameterize( - *testing.product( - {'shape': [(0,), (1,), (2, 3), (2, 3, 4)]})) + *testing.product({"shape": [(0,), (1,), (2, 3), (2, 3, 4)]}) +) @testing.gpu class TestPutmaskSameShape(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_putmask(self, xp, dtype): @@ -161,18 +177,22 @@ def test_putmask(self, xp, dtype): @testing.parameterize( *testing.product( - {'shape': [(0,), (1,), (2, 3), (2, 3, 4)], - 'values_shape': [(2,), (3, 1), (5,)]})) + { + "shape": [(0,), (1,), (2, 3), (2, 3, 4)], + "values_shape": [(2,), (3, 1), (5,)], + } + ) +) @testing.gpu class TestPutmaskDifferentShapes(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_putmask(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype, seed=3) mask = testing.shaped_random(self.shape, xp, dtype=numpy.bool_, seed=4) - values = testing.shaped_random(self.values_shape, - xp, dtype=dtype, seed=5) + values = testing.shaped_random( + self.values_shape, xp, dtype=dtype, seed=5 + ) ret = xp.putmask(a, mask, values) assert ret is None return a @@ -180,7 +200,6 @@ def test_putmask(self, xp, dtype): @testing.gpu class TestPutmask(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_putmask_scalar_values(self, xp): shape = (2, 3) @@ -204,8 +223,7 @@ def test_putmask_int_mask_scalar_values(self, xp): class TestPutmaskDifferentDtypes(unittest.TestCase): - - @testing.for_all_dtypes_combination(names=['a_dtype', 'val_dtype']) + @testing.for_all_dtypes_combination(names=["a_dtype", "val_dtype"]) def test_putmask_differnt_dtypes_raises(self, a_dtype, val_dtype): shape = (2, 3) for xp in (numpy, cupy): @@ -227,14 +245,17 @@ def test_putmask_differnt_dtypes_mask(self, xp, dtype): return a -@testing.parameterize(*testing.product({ - 'shape': [(3, 3), (2, 2, 2), (3, 5), (5, 3)], - 'val': [1, 0, (2,), (2, 2)], - 'wrap': [True, False], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(3, 3), (2, 2, 2), (3, 5), (5, 3)], + "val": [1, 0, (2,), (2, 2)], + "wrap": [True, False], + } + ) +) @testing.gpu class TestFillDiagonal(unittest.TestCase): - def _compute_val(self, xp): if type(self.val) is int: return self.val @@ -254,7 +275,8 @@ def test_fill_diagonal(self, xp, dtype): def test_columnar_slice(self, xp, dtype): # see cupy#2970 if self.shape == (2, 2, 2): pytest.skip( - 'The length of each dimension must be the same after slicing') + "The length of each dimension must be the same after slicing" + ) a = testing.shaped_arange(self.shape, xp, dtype) val = self._compute_val(xp) xp.fill_diagonal(a[:, 1:], val=val, wrap=self.wrap) @@ -269,48 +291,60 @@ def test_1darray(self, dtype): xp.fill_diagonal(a, val=val, wrap=self.wrap) -@testing.parameterize(*testing.product({ - 'n': [2, 4, -3, 0], - 'ndim': [2, 3, 1, 0, -2], -})) +@testing.parameterize( + *testing.product( + { + "n": [2, 4, -3, 0], + "ndim": [2, 3, 1, 0, -2], + } + ) +) @testing.gpu class TestDiagIndices(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_diag_indices(self, xp): return xp.diag_indices(self.n, self.ndim) -@testing.parameterize(*testing.product({ - 'n': [-3, 0], - 'ndim': [1, 0, -2], -})) +@testing.parameterize( + *testing.product( + { + "n": [-3, 0], + "ndim": [1, 0, -2], + } + ) +) @testing.gpu class TestDiagIndicesInvalidValues(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_diag_indices(self, xp): return xp.diag_indices(self.n, self.ndim) -@testing.parameterize(*testing.product({ - 'shape': [(3, 3), (0, 0), (2, 2, 2)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(3, 3), (0, 0), (2, 2, 2)], + } + ) +) @testing.gpu class TestDiagIndicesFrom(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_diag_indices_from(self, xp): arr = testing.shaped_arange(self.shape, xp) return xp.diag_indices_from(arr) -@testing.parameterize(*testing.product({ - 'shape': [(3, 5), (3, 3, 4), (5,), (0,), (-1,)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(3, 5), (3, 3, 4), (5,), (0,), (-1,)], + } + ) +) @testing.gpu class TestDiagIndicesFromRaises(unittest.TestCase): - def test_non_equal_dims(self): for xp in (numpy, cupy): arr = testing.shaped_arange(self.shape, xp) diff --git a/tests/third_party/cupy/indexing_tests/test_iterate.py b/tests/third_party/cupy/indexing_tests/test_iterate.py index ce89ac284ae6..603581b53aef 100644 --- a/tests/third_party/cupy/indexing_tests/test_iterate.py +++ b/tests/third_party/cupy/indexing_tests/test_iterate.py @@ -10,7 +10,6 @@ @testing.gpu class TestFlatiter(unittest.TestCase): - def test_base(self): for xp in (numpy, cupy): a = xp.zeros((2, 3, 4)) @@ -50,21 +49,20 @@ def test_copy_next(self, xp): @testing.parameterize( - {'shape': (2, 3, 4), 'index': Ellipsis}, - {'shape': (2, 3, 4), 'index': 0}, - {'shape': (2, 3, 4), 'index': 10}, - {'shape': (2, 3, 4), 'index': slice(None)}, - {'shape': (2, 3, 4), 'index': slice(None, 10)}, - {'shape': (2, 3, 4), 'index': slice(None, None, 2)}, - {'shape': (2, 3, 4), 'index': slice(None, None, -1)}, - {'shape': (2, 3, 4), 'index': slice(10, None, -1)}, - {'shape': (2, 3, 4), 'index': slice(10, None, -2)}, - {'shape': (), 'index': slice(None)}, - {'shape': (10,), 'index': slice(None)}, + {"shape": (2, 3, 4), "index": Ellipsis}, + {"shape": (2, 3, 4), "index": 0}, + {"shape": (2, 3, 4), "index": 10}, + {"shape": (2, 3, 4), "index": slice(None)}, + {"shape": (2, 3, 4), "index": slice(None, 10)}, + {"shape": (2, 3, 4), "index": slice(None, None, 2)}, + {"shape": (2, 3, 4), "index": slice(None, None, -1)}, + {"shape": (2, 3, 4), "index": slice(10, None, -1)}, + {"shape": (2, 3, 4), "index": slice(10, None, -2)}, + {"shape": (), "index": slice(None)}, + {"shape": (10,), "index": slice(None)}, ) @testing.gpu class TestFlatiterSubscript(unittest.TestCase): - @testing.for_CF_orders() @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -103,31 +101,29 @@ def test_setitem_ndarray_nd(self, xp, dtype, order): return a @testing.for_CF_orders() - @testing.for_all_dtypes_combination(('a_dtype', 'v_dtype')) + @testing.for_all_dtypes_combination(("a_dtype", "v_dtype")) @testing.numpy_cupy_array_equal() - def test_setitem_ndarray_different_types( - self, xp, a_dtype, v_dtype, order): + def test_setitem_ndarray_different_types(self, xp, a_dtype, v_dtype, order): if numpy.isscalar(self.index): pytest.skip() a = xp.zeros(self.shape, dtype=a_dtype, order=order) v = testing.shaped_arange((3,), xp, v_dtype, order) with warnings.catch_warnings(): - warnings.simplefilter('ignore', numpy.ComplexWarning) + warnings.simplefilter("ignore", numpy.ComplexWarning) a.flat[self.index] = v return a @testing.parameterize( - {'shape': (2, 3, 4), 'index': None}, - {'shape': (2, 3, 4), 'index': (0,)}, - {'shape': (2, 3, 4), 'index': True}, + {"shape": (2, 3, 4), "index": None}, + {"shape": (2, 3, 4), "index": (0,)}, + {"shape": (2, 3, 4), "index": True}, # printing behaviour of dparray ('index') makes imposibble to skip this test # {'shape': (2, 3, 4), 'index': cupy.array([0])}, - {'shape': (2, 3, 4), 'index': [0]}, + {"shape": (2, 3, 4), "index": [0]}, ) @testing.gpu class TestFlatiterSubscriptIndexError(unittest.TestCase): - @testing.for_all_dtypes() def test_getitem(self, dtype): a = testing.shaped_arange(self.shape, cupy, dtype) diff --git a/tests/third_party/cupy/linalg_tests/test_einsum.py b/tests/third_party/cupy/linalg_tests/test_einsum.py index f4e2f623e908..cfb697f63ea1 100644 --- a/tests/third_party/cupy/linalg_tests/test_einsum.py +++ b/tests/third_party/cupy/linalg_tests/test_einsum.py @@ -14,26 +14,25 @@ def _dec_shape(shape, dec): class TestEinSumError(unittest.TestCase): - def test_irregular_ellipsis1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('..', xp.zeros((2, 2, 2))) + xp.einsum("..", xp.zeros((2, 2, 2))) def test_irregular_ellipsis2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('...i...', xp.zeros((2, 2, 2))) + xp.einsum("...i...", xp.zeros((2, 2, 2))) def test_irregular_ellipsis3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i...->...i...', xp.zeros((2, 2, 2))) + xp.einsum("i...->...i...", xp.zeros((2, 2, 2))) def test_irregular_ellipsis4(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('...->', xp.zeros((2, 2, 2))) + xp.einsum("...->", xp.zeros((2, 2, 2))) def test_no_arguments(self): for xp in (numpy, cupy): @@ -43,7 +42,7 @@ def test_no_arguments(self): def test_one_argument(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('') + xp.einsum("") def test_not_string_subject(self): for xp in (numpy, cupy): @@ -53,142 +52,139 @@ def test_not_string_subject(self): def test_bad_argument(self): for xp in (numpy, cupy): with pytest.raises(TypeError): - xp.einsum('', 0, bad_arg=0) + xp.einsum("", 0, bad_arg=0) def test_too_many_operands1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('', 0, 0) + xp.einsum("", 0, 0) def test_too_many_operands2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): xp.einsum( - 'i,j', - xp.array([0, 0]), - xp.array([0, 0]), - xp.array([0, 0])) + "i,j", xp.array([0, 0]), xp.array([0, 0]), xp.array([0, 0]) + ) def test_too_few_operands1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum(',', 0) + xp.einsum(",", 0) def test_too_many_dimension1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i', 0) + xp.einsum("i", 0) def test_too_many_dimension2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ij', xp.array([0, 0])) + xp.einsum("ij", xp.array([0, 0])) def test_too_many_dimension3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ijk...->...', xp.arange(6).reshape(2, 3)) + xp.einsum("ijk...->...", xp.arange(6).reshape(2, 3)) def test_too_few_dimension(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i->i', xp.arange(6).reshape(2, 3)) + xp.einsum("i->i", xp.arange(6).reshape(2, 3)) def test_invalid_char1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i%', xp.array([0, 0])) + xp.einsum("i%", xp.array([0, 0])) def test_invalid_char2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('j$', xp.array([0, 0])) + xp.einsum("j$", xp.array([0, 0])) def test_invalid_char3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i->&', xp.array([0, 0])) + xp.einsum("i->&", xp.array([0, 0])) # output subscripts must appear in inumpy.t def test_invalid_output_subscripts1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i->ij', xp.array([0, 0])) + xp.einsum("i->ij", xp.array([0, 0])) # output subscripts may only be specified once def test_invalid_output_subscripts2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ij->jij', xp.array([[0, 0], [0, 0]])) + xp.einsum("ij->jij", xp.array([[0, 0], [0, 0]])) # output subscripts must not incrudes comma def test_invalid_output_subscripts3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ij->i,j', xp.array([[0, 0], [0, 0]])) + xp.einsum("ij->i,j", xp.array([[0, 0], [0, 0]])) # dimensions much match when being collapsed def test_invalid_diagonal1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ii', xp.arange(6).reshape(2, 3)) + xp.einsum("ii", xp.arange(6).reshape(2, 3)) def test_invalid_diagonal2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ii->', xp.arange(6).reshape(2, 3)) + xp.einsum("ii->", xp.arange(6).reshape(2, 3)) def test_invalid_diagonal3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('ii', xp.arange(3).reshape(1, 3)) + xp.einsum("ii", xp.arange(3).reshape(1, 3)) def test_dim_mismatch_char1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i,i', xp.arange(2), xp.arange(3)) + xp.einsum("i,i", xp.arange(2), xp.arange(3)) def test_dim_mismatch_ellipsis1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('...,...', xp.arange(2), xp.arange(3)) + xp.einsum("...,...", xp.arange(2), xp.arange(3)) def test_dim_mismatch_ellipsis2(self): for xp in (numpy, cupy): a = xp.arange(12).reshape(2, 3, 2) with pytest.raises(ValueError): - xp.einsum('i...,...i', a, a) + xp.einsum("i...,...i", a, a) def test_dim_mismatch_ellipsis3(self): for xp in (numpy, cupy): a = xp.arange(12).reshape(2, 3, 2) with pytest.raises(ValueError): - xp.einsum('...,...', a, a[:, :2]) + xp.einsum("...,...", a, a[:, :2]) # invalid -> operator def test_invalid_arrow1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i-i', xp.array([0, 0])) + xp.einsum("i-i", xp.array([0, 0])) def test_invalid_arrow2(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i>i', xp.array([0, 0])) + xp.einsum("i>i", xp.array([0, 0])) def test_invalid_arrow3(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i->->i', xp.array([0, 0])) + xp.einsum("i->->i", xp.array([0, 0])) def test_invalid_arrow4(self): for xp in (numpy, cupy): with pytest.raises(ValueError): - xp.einsum('i-', xp.array([0, 0])) + xp.einsum("i-", xp.array([0, 0])) class TestListArgEinSumError(unittest.TestCase): - def test_invalid_sub1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): @@ -239,12 +235,12 @@ class TestEinSumUnaryOperationWithScalar(unittest.TestCase): @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_scalar_int(self, xp, dtype): - return xp.asarray(xp.einsum('->', 2, dtype=dtype)) + return xp.asarray(xp.einsum("->", 2, dtype=dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_scalar_float(self, xp, dtype): - return xp.asarray(xp.einsum('', 2.0, dtype=dtype)) + return xp.asarray(xp.einsum("", 2.0, dtype=dtype)) class TestEinSumBinaryOperationWithScalar(unittest.TestCase): @@ -253,36 +249,44 @@ class TestEinSumBinaryOperationWithScalar(unittest.TestCase): def test_scalar_1(self, xp, dtype): shape_a = (2,) a = testing.shaped_arange(shape_a, xp, dtype) - return xp.asarray(xp.einsum(',i->', 3, a)) + return xp.asarray(xp.einsum(",i->", 3, a)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_scalar_2(self, xp, dtype): shape_a = (2,) a = testing.shaped_arange(shape_a, xp, dtype) - return xp.asarray(xp.einsum('i,->', a, 4)) - - -@testing.parameterize(*([ - # memory constraint - {'subscript': 'a,b,c->abc', 'opt': ('greedy', 0)}, - {'subscript': 'acdf,jbje,gihb,hfac', 'opt': ('greedy', 0)}, -] + testing.product({'subscript': [ - # long paths - 'acdf,jbje,gihb,hfac,gfac,gifabc,hfac', - 'chd,bde,agbc,hiad,bdi,cgh,agdb', - # edge cases - 'eb,cb,fb->cef', - 'dd,fb,be,cdb->cef', - 'bca,cdb,dbf,afc->', - 'dcc,fce,ea,dbf->ab', - 'a,ac,ab,ad,cd,bd,bc->', -], 'opt': ['greedy', 'optimal'], -}))) + return xp.asarray(xp.einsum("i,->", a, 4)) + + +@testing.parameterize( + *( + [ + # memory constraint + {"subscript": "a,b,c->abc", "opt": ("greedy", 0)}, + {"subscript": "acdf,jbje,gihb,hfac", "opt": ("greedy", 0)}, + ] + + testing.product( + { + "subscript": [ + # long paths + "acdf,jbje,gihb,hfac,gfac,gifabc,hfac", + "chd,bde,agbc,hiad,bdi,cgh,agdb", + # edge cases + "eb,cb,fb->cef", + "dd,fb,be,cdb->cef", + "bca,cdb,dbf,afc->", + "dcc,fce,ea,dbf->ab", + "a,ac,ab,ad,cd,bd,bc->", + ], + "opt": ["greedy", "optimal"], + } + ) + ) +) class TestEinSumLarge(unittest.TestCase): - def setUp(self): - chars = 'abcdefghij' + chars = "abcdefghij" sizes = numpy.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) size_dict = {} for size, char in zip(sizes, chars): @@ -291,7 +295,7 @@ def setUp(self): # Builds views based off initial operands string = self.subscript operands = [string] - terms = string.split('->')[0].split(',') + terms = string.split("->")[0].split(",") for term in terms: dims = [size_dict[x] for x in term] operands.append(numpy.ones(*dims)) @@ -304,10 +308,11 @@ def test_einsum(self, xp): with warnings.catch_warnings(record=True) as ws: # I hope there's no problem with np.einsum for these cases... out = xp.einsum(*self.operands, optimize=self.opt) - if xp is not numpy and \ - isinstance(self.opt, tuple): # with memory limit + if xp is not numpy and isinstance( + self.opt, tuple + ): # with memory limit for w in ws: - self.assertIn('memory', str(w.message)) + self.assertIn("memory", str(w.message)) else: self.assertEqual(len(ws), 0) return out diff --git a/tests/third_party/cupy/linalg_tests/test_product.py b/tests/third_party/cupy/linalg_tests/test_product.py index a4eff836e12c..9624d734649e 100644 --- a/tests/third_party/cupy/linalg_tests/test_product.py +++ b/tests/third_party/cupy/linalg_tests/test_product.py @@ -7,34 +7,37 @@ from tests.third_party.cupy import testing -@testing.parameterize(*testing.product({ - 'shape': [ - ((2, 3, 4), (3, 4, 2)), - ((1, 1), (1, 1)), - ((1, 1), (1, 2)), - ((1, 2), (2, 1)), - ((2, 1), (1, 1)), - ((1, 2), (2, 3)), - ((2, 1), (1, 3)), - ((2, 3), (3, 1)), - ((2, 3), (3, 4)), - ((0, 3), (3, 4)), - ((2, 3), (3, 0)), - ((0, 3), (3, 0)), - ((3, 0), (0, 4)), - ((2, 3, 0), (3, 0, 2)), - ((0, 0), (0, 0)), - ((3,), (3,)), - ((2,), (2, 4)), - ((4, 2), (2,)), - ], - 'trans_a': [True, False], - 'trans_b': [True, False], -})) +@testing.parameterize( + *testing.product( + { + "shape": [ + ((2, 3, 4), (3, 4, 2)), + ((1, 1), (1, 1)), + ((1, 1), (1, 2)), + ((1, 2), (2, 1)), + ((2, 1), (1, 1)), + ((1, 2), (2, 3)), + ((2, 1), (1, 3)), + ((2, 3), (3, 1)), + ((2, 3), (3, 4)), + ((0, 3), (3, 4)), + ((2, 3), (3, 0)), + ((0, 3), (3, 0)), + ((3, 0), (0, 4)), + ((2, 3, 0), (3, 0, 2)), + ((0, 0), (0, 0)), + ((3,), (3,)), + ((2,), (2, 4)), + ((4, 2), (2,)), + ], + "trans_a": [True, False], + "trans_b": [True, False], + } + ) +) @testing.gpu class TestDot(unittest.TestCase): - - @testing.for_all_dtypes_combination(['dtype_a', 'dtype_b']) + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose() def test_dot(self, xp, dtype_a, dtype_b): shape_a, shape_b = self.shape @@ -48,9 +51,9 @@ def test_dot(self, xp, dtype_a, dtype_b): b = testing.shaped_arange(shape_b, xp, dtype_b) return xp.dot(a, b) - @testing.for_float_dtypes(name='dtype_a') - @testing.for_float_dtypes(name='dtype_b') - @testing.for_float_dtypes(name='dtype_c') + @testing.for_float_dtypes(name="dtype_a") + @testing.for_float_dtypes(name="dtype_b") + @testing.for_float_dtypes(name="dtype_c") @testing.numpy_cupy_allclose(accept_error=ValueError) def test_dot_with_out(self, xp, dtype_a, dtype_b, dtype_c): shape_a, shape_b = self.shape @@ -72,27 +75,30 @@ def test_dot_with_out(self, xp, dtype_a, dtype_b, dtype_c): return c -@testing.parameterize(*testing.product({ - 'params': [ - # Test for 0 dimension - ((3, ), (3, ), -1, -1, -1), - # Test for basic cases - ((1, 2), (1, 2), -1, -1, 1), - ((1, 3), (1, 3), 1, -1, -1), - ((1, 2), (1, 3), -1, -1, 1), - ((2, 2), (1, 3), -1, -1, 0), - ((3, 3), (1, 2), 0, -1, -1), - ((0, 3), (0, 3), -1, -1, -1), - # Test for higher dimensions - ((2, 0, 3), (2, 0, 3), 0, 0, 0), - ((2, 4, 5, 3), (2, 4, 5, 3), -1, -1, 0), - ((2, 4, 5, 2), (2, 4, 5, 2), 0, 0, -1), - ], -})) +@testing.parameterize( + *testing.product( + { + "params": [ + # Test for 0 dimension + ((3,), (3,), -1, -1, -1), + # Test for basic cases + ((1, 2), (1, 2), -1, -1, 1), + ((1, 3), (1, 3), 1, -1, -1), + ((1, 2), (1, 3), -1, -1, 1), + ((2, 2), (1, 3), -1, -1, 0), + ((3, 3), (1, 2), 0, -1, -1), + ((0, 3), (0, 3), -1, -1, -1), + # Test for higher dimensions + ((2, 0, 3), (2, 0, 3), 0, 0, 0), + ((2, 4, 5, 3), (2, 4, 5, 3), -1, -1, 0), + ((2, 4, 5, 2), (2, 4, 5, 2), 0, 0, -1), + ], + } + ) +) @testing.gpu class TestCrossProduct(unittest.TestCase): - - @testing.for_all_dtypes_combination(['dtype_a', 'dtype_b']) + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose() def test_cross(self, xp, dtype_a, dtype_b): if dtype_a == dtype_b == numpy.bool_: @@ -104,19 +110,22 @@ def test_cross(self, xp, dtype_a, dtype_b): return xp.cross(a, b, axisa, axisb, axisc) -@testing.parameterize(*testing.product({ - 'shape': [ - ((), ()), - ((), (2, 4)), - ((4, 2), ()), - ], - 'trans_a': [True, False], - 'trans_b': [True, False], -})) +@testing.parameterize( + *testing.product( + { + "shape": [ + ((), ()), + ((), (2, 4)), + ((4, 2), ()), + ], + "trans_a": [True, False], + "trans_b": [True, False], + } + ) +) @testing.gpu class TestDotFor0Dim(unittest.TestCase): - - @testing.for_all_dtypes_combination(['dtype_a', 'dtype_b']) + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose(contiguous_check=False) def test_dot(self, xp, dtype_a, dtype_b): shape_a, shape_b = self.shape @@ -133,7 +142,6 @@ def test_dot(self, xp, dtype_a, dtype_b): @testing.gpu class TestProduct(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_dot_vec1(self, xp, dtype): @@ -176,7 +184,7 @@ def test_transposed_dot_with_out_f_contiguous(self, dtype): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(1, 0, 2) b = testing.shaped_arange((4, 2, 3), xp, dtype).transpose(2, 0, 1) - c = xp.ndarray((3, 2, 3, 2), dtype=dtype, order='F') + c = xp.ndarray((3, 2, 3, 2), dtype=dtype, order="F") with pytest.raises(ValueError): # Only C-contiguous array is acceptable xp.dot(a, b, out=c) @@ -220,8 +228,7 @@ def test_multidim_vdot(self, xp, dtype): @testing.numpy_cupy_allclose() def test_transposed_multidim_vdot(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - b = testing.shaped_arange( - (2, 2, 2, 3), xp, dtype).transpose(1, 3, 0, 2) + b = testing.shaped_arange((2, 2, 2, 3), xp, dtype).transpose(1, 3, 0, 2) return xp.vdot(a, b) @testing.for_all_dtypes() @@ -304,16 +311,16 @@ def test_tensordot_with_int_axes(self, xp, dtype): def test_transposed_tensordot_with_int_axes(self, xp, dtype): if dtype in (numpy.uint8, numpy.int8, numpy.uint16, numpy.int16): # Avoid overflow - a = testing.shaped_arange( - (1, 2, 3), xp, dtype).transpose(2, 0, 1) - b = testing.shaped_arange( - (3, 2, 1), xp, dtype).transpose(2, 1, 0) + a = testing.shaped_arange((1, 2, 3), xp, dtype).transpose(2, 0, 1) + b = testing.shaped_arange((3, 2, 1), xp, dtype).transpose(2, 1, 0) return xp.tensordot(a, b, axes=2) else: - a = testing.shaped_arange( - (2, 3, 4, 5), xp, dtype).transpose(2, 0, 3, 1) - b = testing.shaped_arange( - (5, 4, 3, 2), xp, dtype).transpose(3, 0, 2, 1) + a = testing.shaped_arange((2, 3, 4, 5), xp, dtype).transpose( + 2, 0, 3, 1 + ) + b = testing.shaped_arange((5, 4, 3, 2), xp, dtype).transpose( + 3, 0, 2, 1 + ) return xp.tensordot(a, b, axes=3) @testing.for_all_dtypes() @@ -334,16 +341,16 @@ def test_tensordot_with_list_axes(self, xp, dtype): def test_transposed_tensordot_with_list_axes(self, xp, dtype): if dtype in (numpy.uint8, numpy.int8, numpy.uint16, numpy.int16): # Avoid overflow - a = testing.shaped_arange( - (1, 2, 3), xp, dtype).transpose(2, 0, 1) - b = testing.shaped_arange( - (2, 3, 1), xp, dtype).transpose(0, 2, 1) + a = testing.shaped_arange((1, 2, 3), xp, dtype).transpose(2, 0, 1) + b = testing.shaped_arange((2, 3, 1), xp, dtype).transpose(0, 2, 1) return xp.tensordot(a, b, axes=([2, 0], [0, 2])) else: - a = testing.shaped_arange( - (2, 3, 4, 5), xp, dtype).transpose(2, 0, 3, 1) - b = testing.shaped_arange( - (3, 5, 4, 2), xp, dtype).transpose(3, 0, 2, 1) + a = testing.shaped_arange((2, 3, 4, 5), xp, dtype).transpose( + 2, 0, 3, 1 + ) + b = testing.shaped_arange((3, 5, 4, 2), xp, dtype).transpose( + 3, 0, 2, 1 + ) return xp.tensordot(a, b, axes=([2, 0, 3], [3, 2, 1])) @testing.for_all_dtypes() @@ -382,19 +389,22 @@ def test_zerodim_kron(self, xp, dtype): return xp.kron(a, b) -@testing.parameterize(*testing.product({ - 'params': [ - ((0, 0), 2), - ((0, 0), (1, 0)), - ((0, 0, 0), 2), - ((0, 0, 0), 3), - ((0, 0, 0), ([2, 1], [0, 2])), - ((0, 0, 0), ([0, 2, 1], [1, 2, 0])), - ], -})) +@testing.parameterize( + *testing.product( + { + "params": [ + ((0, 0), 2), + ((0, 0), (1, 0)), + ((0, 0, 0), 2), + ((0, 0, 0), 3), + ((0, 0, 0), ([2, 1], [0, 2])), + ((0, 0, 0), ([0, 2, 1], [1, 2, 0])), + ], + } + ) +) @testing.gpu class TestProductZeroLength(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_tensordot_zero_length(self, xp, dtype): diff --git a/tests/third_party/cupy/logic_tests/test_comparison.py b/tests/third_party/cupy/logic_tests/test_comparison.py index 32ebc76c9b61..6f928a0a2e1f 100644 --- a/tests/third_party/cupy/logic_tests/test_comparison.py +++ b/tests/third_party/cupy/logic_tests/test_comparison.py @@ -9,7 +9,6 @@ @testing.gpu class TestComparison(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) def check_binary(self, name, xp, dtype): @@ -18,31 +17,34 @@ def check_binary(self, name, xp, dtype): return getattr(xp, name)(a, b) def test_greater(self): - self.check_binary('greater') + self.check_binary("greater") def test_greater_equal(self): - self.check_binary('greater_equal') + self.check_binary("greater_equal") def test_less(self): - self.check_binary('less') + self.check_binary("less") def test_less_equal(self): - self.check_binary('less_equal') + self.check_binary("less_equal") def test_not_equal(self): - self.check_binary('not_equal') + self.check_binary("not_equal") def test_equal(self): - self.check_binary('equal') + self.check_binary("equal") @testing.gpu class TestComparisonOperator(unittest.TestCase): operators = [ - operator.lt, operator.le, - operator.eq, operator.ne, - operator.gt, operator.ge, + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, ] @testing.for_all_dtypes(no_complex=True) @@ -75,7 +77,6 @@ def test_binary_array_pyscalar(self, xp, dtype): class TestArrayEqual(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_array_equal_not_equal(self, xp, dtype): @@ -117,7 +118,6 @@ def test_array_equal_broadcast_not_allowed(self, xp): class TestAllclose(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_allclose_finite(self, xp, dtype): @@ -129,15 +129,15 @@ def test_allclose_finite(self, xp, dtype): @testing.numpy_cupy_equal() def test_allclose_min_int(self, xp, dtype): a = xp.array([0], dtype=dtype) - b = xp.array([numpy.iinfo('i').min], dtype=dtype) + b = xp.array([numpy.iinfo("i").min], dtype=dtype) return xp.allclose(a, b) @testing.for_float_dtypes() @testing.numpy_cupy_equal() def test_allclose_infinite(self, xp, dtype): - nan = float('nan') - inf = float('inf') - ninf = float('-inf') + nan = float("nan") + inf = float("inf") + ninf = float("-inf") a = xp.array([0, nan, nan, 0, inf, ninf], dtype=dtype) b = xp.array([0, nan, 0, nan, inf, ninf], dtype=dtype) return xp.allclose(a, b) @@ -145,9 +145,9 @@ def test_allclose_infinite(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_equal() def test_allclose_infinite_equal_nan(self, xp, dtype): - nan = float('nan') - inf = float('inf') - ninf = float('-inf') + nan = float("nan") + inf = float("inf") + ninf = float("-inf") a = xp.array([0, nan, inf, ninf], dtype=dtype) b = xp.array([0, nan, inf, ninf], dtype=dtype) return xp.allclose(a, b, equal_nan=True) @@ -161,7 +161,6 @@ def test_allclose_array_scalar(self, xp, dtype): class TestIsclose(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_is_close_finite(self, xp, dtype): @@ -175,15 +174,15 @@ def test_is_close_finite(self, xp, dtype): def test_is_close_min_int(self, xp, dtype): # In numpy<1.10 this test fails when dtype is bool a = xp.array([0], dtype=dtype) - b = xp.array([numpy.iinfo('i').min], dtype=dtype) + b = xp.array([numpy.iinfo("i").min], dtype=dtype) return xp.isclose(a, b) @testing.for_float_dtypes() @testing.numpy_cupy_array_equal() def test_is_close_infinite(self, xp, dtype): - nan = float('nan') - inf = float('inf') - ninf = float('-inf') + nan = float("nan") + inf = float("inf") + ninf = float("-inf") a = xp.array([0, nan, nan, 0, inf, ninf], dtype=dtype) b = xp.array([0, nan, 0, nan, inf, ninf], dtype=dtype) return xp.isclose(a, b) @@ -191,9 +190,9 @@ def test_is_close_infinite(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_array_equal() def test_is_close_infinite_equal_nan(self, xp, dtype): - nan = float('nan') - inf = float('inf') - ninf = float('-inf') + nan = float("nan") + inf = float("inf") + ninf = float("-inf") a = xp.array([0, nan, inf, ninf], dtype=dtype) b = xp.array([0, nan, inf, ninf], dtype=dtype) return xp.isclose(a, b, equal_nan=True) diff --git a/tests/third_party/cupy/logic_tests/test_content.py b/tests/third_party/cupy/logic_tests/test_content.py index 32d4f95d1ef6..8393e437f2f6 100644 --- a/tests/third_party/cupy/logic_tests/test_content.py +++ b/tests/third_party/cupy/logic_tests/test_content.py @@ -7,27 +7,25 @@ @testing.gpu class TestContent(unittest.TestCase): - - @testing.for_dtypes('fd') + @testing.for_dtypes("fd") @testing.numpy_cupy_array_equal() def check_unary_inf(self, name, xp, dtype): - a = xp.array([-3, numpy.inf, -1, -numpy.inf, 0, 1, 2], - dtype=dtype) + a = xp.array([-3, numpy.inf, -1, -numpy.inf, 0, 1, 2], dtype=dtype) return getattr(xp, name)(a) - @testing.for_dtypes('fd') + @testing.for_dtypes("fd") @testing.numpy_cupy_array_equal() def check_unary_nan(self, name, xp, dtype): a = xp.array( - [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, numpy.inf], - dtype=dtype) + [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, numpy.inf], dtype=dtype + ) return getattr(xp, name)(a) def test_isfinite(self): - self.check_unary_inf('isfinite') + self.check_unary_inf("isfinite") def test_isinf(self): - self.check_unary_inf('isinf') + self.check_unary_inf("isinf") def test_isnan(self): - self.check_unary_nan('isnan') + self.check_unary_nan("isnan") diff --git a/tests/third_party/cupy/logic_tests/test_ops.py b/tests/third_party/cupy/logic_tests/test_ops.py index 2948602b8a51..1b1858131a5c 100644 --- a/tests/third_party/cupy/logic_tests/test_ops.py +++ b/tests/third_party/cupy/logic_tests/test_ops.py @@ -5,7 +5,6 @@ @testing.gpu class TestOps(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): @@ -20,13 +19,13 @@ def check_binary(self, name, xp, dtype): return getattr(xp, name)(a, b) def test_logical_and(self): - self.check_binary('logical_and') + self.check_binary("logical_and") def test_logical_or(self): - self.check_binary('logical_or') + self.check_binary("logical_or") def test_logical_xor(self): - self.check_binary('logical_xor') + self.check_binary("logical_xor") def test_logical_not(self): - self.check_unary('logical_not') + self.check_unary("logical_not") diff --git a/tests/third_party/cupy/manipulation_tests/test_basic.py b/tests/third_party/cupy/manipulation_tests/test_basic.py index 936e94c7e19f..cee630066bfe 100644 --- a/tests/third_party/cupy/manipulation_tests/test_basic.py +++ b/tests/third_party/cupy/manipulation_tests/test_basic.py @@ -4,13 +4,13 @@ import numpy import dpnp as cupy + # from cupy import cuda from tests.third_party.cupy import testing @testing.gpu class TestBasic(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_copyto(self, xp, dtype): @@ -22,7 +22,7 @@ def test_copyto(self, xp, dtype): @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_copyto_dtype(self, xp, dtype): - a = testing.shaped_arange((2, 3, 4), xp, dtype='?') + a = testing.shaped_arange((2, 3, 4), xp, dtype="?") b = xp.empty((2, 3, 4), dtype=dtype) xp.copyto(b, a) return b @@ -40,7 +40,7 @@ def test_copyto_broadcast(self, xp, dtype): def test_copyto_where(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype) - c = testing.shaped_arange((2, 3, 4), xp, '?') + c = testing.shaped_arange((2, 3, 4), xp, "?") xp.copyto(a, b, where=c) return a @@ -105,11 +105,14 @@ def test_copyto_where(self, xp, dtype): @testing.parameterize( *testing.product( - {'src': [float(3.2), int(0), int(4), int(-4), True, False, 1 + 1j], - 'dst_shape': [(), (0,), (1,), (1, 1), (2, 2)]})) + { + "src": [float(3.2), int(0), int(4), int(-4), True, False, 1 + 1j], + "dst_shape": [(), (0,), (1,), (1, 1), (2, 2)], + } + ) +) @testing.gpu class TestCopytoFromScalar(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(accept_error=TypeError) def test_copyto(self, xp, dtype): @@ -121,7 +124,8 @@ def test_copyto(self, xp, dtype): @testing.numpy_cupy_allclose(accept_error=TypeError) def test_copyto_where(self, xp, dtype): dst = xp.ones(self.dst_shape, dtype=dtype) - mask = (testing.shaped_arange( - self.dst_shape, xp, dtype) % 2).astype(xp.bool_) + mask = (testing.shaped_arange(self.dst_shape, xp, dtype) % 2).astype( + xp.bool_ + ) xp.copyto(dst, self.src, where=mask) return dst diff --git a/tests/third_party/cupy/manipulation_tests/test_dims.py b/tests/third_party/cupy/manipulation_tests/test_dims.py index c6ef32b21484..8f7315c86e1b 100644 --- a/tests/third_party/cupy/manipulation_tests/test_dims.py +++ b/tests/third_party/cupy/manipulation_tests/test_dims.py @@ -9,7 +9,6 @@ @testing.gpu class TestDims(unittest.TestCase): - def check_atleast(self, func, xp): a = testing.shaped_arange((), xp) b = testing.shaped_arange((2,), xp) @@ -118,27 +117,30 @@ def test_expand_dims_negative1(self, xp): a = testing.shaped_arange((2, 3), xp) return xp.expand_dims(a, -2) - @testing.with_requires('numpy>=1.18') + @testing.with_requires("numpy>=1.18") def test_expand_dims_negative2(self): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3), xp) with pytest.raises(numpy.AxisError): xp.expand_dims(a, -4) - @testing.with_requires('numpy>=1.18') + @testing.with_requires("numpy>=1.18") @testing.numpy_cupy_array_equal() def test_expand_dims_tuple_axis(self, xp): a = testing.shaped_arange((2, 2, 2), xp) - return [xp.expand_dims(a, axis) for axis in [ - (0, 1, 2), - (0, -1, -2), - (0, 3, 5), - (0, -3, -5), - (), - (1,), - ]] - - @testing.with_requires('numpy>=1.18') + return [ + xp.expand_dims(a, axis) + for axis in [ + (0, 1, 2), + (0, -1, -2), + (0, 3, 5), + (0, -3, -5), + (), + (1,), + ] + ] + + @testing.with_requires("numpy>=1.18") def test_expand_dims_out_of_range(self): for xp in (numpy, cupy): a = testing.shaped_arange((2, 2, 2), xp) @@ -146,7 +148,7 @@ def test_expand_dims_out_of_range(self): with pytest.raises(numpy.AxisError): xp.expand_dims(a, axis) - @testing.with_requires('numpy>=1.18') + @testing.with_requires("numpy>=1.18") def test_expand_dims_repeated_axis(self): for xp in (numpy, cupy): a = testing.shaped_arange((2, 2, 2), xp) @@ -252,7 +254,7 @@ def test_squeeze_scalar_failure3(self): a.squeeze(axis=-2) def test_squeeze_scalar_failure4(self): - for xp in (numpy, cupy): + for _ in (numpy, cupy): a = testing.shaped_arange((), cupy) with pytest.raises(numpy.AxisError): a.squeeze(axis=1) @@ -270,24 +272,22 @@ def test_external_squeeze(self, xp): @testing.parameterize( - {'shapes': [(), ()]}, - {'shapes': [(0,), (0,)]}, - {'shapes': [(1,), (1,)]}, - {'shapes': [(2,), (2,)]}, - {'shapes': [(0,), (1,)]}, - {'shapes': [(2, 3), (1, 3)]}, - {'shapes': [(2, 1, 3, 4), (3, 1, 4)]}, - {'shapes': [(4, 3, 2, 3), (2, 3)]}, - {'shapes': [(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]}, - {'shapes': [(0, 1, 1, 3), (2, 1, 0, 0, 3)]}, - {'shapes': [(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]}, + {"shapes": [(), ()]}, + {"shapes": [(0,), (0,)]}, + {"shapes": [(1,), (1,)]}, + {"shapes": [(2,), (2,)]}, + {"shapes": [(0,), (1,)]}, + {"shapes": [(2, 3), (1, 3)]}, + {"shapes": [(2, 1, 3, 4), (3, 1, 4)]}, + {"shapes": [(4, 3, 2, 3), (2, 3)]}, + {"shapes": [(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]}, + {"shapes": [(0, 1, 1, 3), (2, 1, 0, 0, 3)]}, + {"shapes": [(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]}, ) @testing.gpu class TestBroadcast(unittest.TestCase): - def _broadcast(self, xp, dtype, shapes): - arrays = [ - testing.shaped_arange(s, xp, dtype) for s in shapes] + arrays = [testing.shaped_arange(s, xp, dtype) for s in shapes] return xp.broadcast(*arrays) @testing.for_all_dtypes() @@ -301,20 +301,34 @@ def test_broadcast(self, dtype): @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_broadcast_arrays(self, xp, dtype): - arrays = [ - testing.shaped_arange(s, xp, dtype) for s in self.shapes] + arrays = [testing.shaped_arange(s, xp, dtype) for s in self.shapes] return xp.broadcast_arrays(*arrays) @testing.parameterize( - {'shapes': [(3,), (2,)]}, - {'shapes': [(3, 2), (2, 3,)]}, - {'shapes': [(3, 2), (3, 4,)]}, - {'shapes': [(0,), (2,)]}, + {"shapes": [(3,), (2,)]}, + { + "shapes": [ + (3, 2), + ( + 2, + 3, + ), + ] + }, + { + "shapes": [ + (3, 2), + ( + 3, + 4, + ), + ] + }, + {"shapes": [(0,), (2,)]}, ) @testing.gpu class TestInvalidBroadcast(unittest.TestCase): - @testing.for_all_dtypes() def test_invalid_broadcast(self, dtype): for xp in (numpy, cupy): diff --git a/tests/third_party/cupy/manipulation_tests/test_shape.py b/tests/third_party/cupy/manipulation_tests/test_shape.py index b80437dba892..a61e989e62b0 100644 --- a/tests/third_party/cupy/manipulation_tests/test_shape.py +++ b/tests/third_party/cupy/manipulation_tests/test_shape.py @@ -7,12 +7,15 @@ from tests.third_party.cupy import testing -@testing.parameterize(*testing.product({ - 'shape': [(2, 3), (), (4,)], -})) +@testing.parameterize( + *testing.product( + { + "shape": [(2, 3), (), (4,)], + } + ) +) @testing.gpu class TestShape(unittest.TestCase): - def test_shape(self): shape = self.shape for xp in (numpy, cupy): @@ -28,20 +31,21 @@ def test_shape_list(self): @testing.gpu class TestReshape(unittest.TestCase): - def test_reshape_strides(self): def func(xp): a = testing.shaped_arange((1, 1, 1, 2, 2), xp) return a.strides + self.assertEqual(func(numpy), func(cupy)) def test_reshape2(self): def func(xp): a = xp.zeros((8,), dtype=xp.float32) return a.reshape((1, 1, 1, 4, 1, 2)).strides + self.assertEqual(func(numpy), func(cupy)) - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_nocopy_reshape(self, xp, dtype, order): @@ -50,7 +54,7 @@ def test_nocopy_reshape(self, xp, dtype, order): b[1] = 1 return a - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_nocopy_reshape_with_order(self, xp, dtype, order): @@ -59,13 +63,13 @@ def test_nocopy_reshape_with_order(self, xp, dtype, order): b[1] = 1 return a - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_transposed_reshape2(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp).transpose(2, 0, 1) return a.reshape(2, 3, 4, order=order) - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_reshape_with_unknown_dimension(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) @@ -87,7 +91,7 @@ def test_reshape_invalid_order(self): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3, 4), xp) with pytest.raises(ValueError): - a.reshape(2, 4, 4, order='K') + a.reshape(2, 4, 4, order="K") def test_reshape_zerosize_invalid(self): for xp in (numpy, cupy): @@ -100,7 +104,7 @@ def test_reshape_zerosize(self, xp): a = xp.zeros((0,)) return a.reshape((0,)) - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_external_reshape(self, xp, order): a = xp.zeros((8,), dtype=xp.float32) @@ -109,21 +113,20 @@ def test_external_reshape(self, xp, order): @testing.gpu class TestRavel(unittest.TestCase): - - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_ravel(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) a = a.transpose(2, 0, 1) return a.ravel(order) - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_ravel2(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) return a.ravel(order) - @testing.for_orders('CFA') + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_ravel3(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) @@ -137,16 +140,21 @@ def test_external_ravel(self, xp): return xp.ravel(a) -@testing.parameterize(*testing.product({ - 'order_init': ['C', 'F'], - 'order_reshape': ['C', 'F', 'A', 'c', 'f', 'a'], - 'shape_in_out': [((2, 3), (1, 6, 1)), # (shape_init, shape_final) - ((6,), (2, 3)), - ((3, 3, 3), (9, 3))], -})) +@testing.parameterize( + *testing.product( + { + "order_init": ["C", "F"], + "order_reshape": ["C", "F", "A", "c", "f", "a"], + "shape_in_out": [ + ((2, 3), (1, 6, 1)), # (shape_init, shape_final) + ((6,), (2, 3)), + ((3, 3, 3), (9, 3)), + ], + } + ) +) @testing.gpu class TestReshapeOrder(unittest.TestCase): - def test_reshape_contiguity(self): shape_init, shape_final = self.shape_in_out diff --git a/tests/third_party/cupy/manipulation_tests/test_tiling.py b/tests/third_party/cupy/manipulation_tests/test_tiling.py index b6d0d180bf2e..0fba8beb35ad 100644 --- a/tests/third_party/cupy/manipulation_tests/test_tiling.py +++ b/tests/third_party/cupy/manipulation_tests/test_tiling.py @@ -8,17 +8,16 @@ @testing.parameterize( - {'repeats': 0, 'axis': None}, - {'repeats': 2, 'axis': None}, - {'repeats': 2, 'axis': 1}, - {'repeats': 2, 'axis': -1}, - {'repeats': [0, 0, 0], 'axis': 1}, - {'repeats': [1, 2, 3], 'axis': 1}, - {'repeats': [1, 2, 3], 'axis': -2}, + {"repeats": 0, "axis": None}, + {"repeats": 2, "axis": None}, + {"repeats": 2, "axis": 1}, + {"repeats": 2, "axis": -1}, + {"repeats": [0, 0, 0], "axis": 1}, + {"repeats": [1, 2, 3], "axis": 1}, + {"repeats": [1, 2, 3], "axis": -2}, ) @testing.gpu class TestRepeat(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_array_repeat(self, xp): x = testing.shaped_arange((2, 3, 4), xp) @@ -26,28 +25,27 @@ def test_array_repeat(self, xp): class TestRepeatRepeatsNdarray(unittest.TestCase): - def test_func(self): a = testing.shaped_arange((2, 3, 4), cupy) repeats = cupy.array([2, 3], dtype=cupy.int32) - with pytest.raises(ValueError, match=r'repeats'): + with pytest.raises(ValueError, match=r"repeats"): cupy.repeat(a, repeats) def test_method(self): a = testing.shaped_arange((2, 3, 4), cupy) repeats = cupy.array([2, 3], dtype=cupy.int32) - with pytest.raises(ValueError, match=r'repeats'): + with pytest.raises(ValueError, match=r"repeats"): a.repeat(repeats) @testing.parameterize( - {'repeats': [2], 'axis': None}, - {'repeats': [2], 'axis': 1}, + {"repeats": [2], "axis": None}, + {"repeats": [2], "axis": 1}, ) @testing.gpu class TestRepeatListBroadcast(unittest.TestCase): - - """Test for `repeats` argument using single element list. + """ + Test for `repeats` argument using single element list. This feature is only supported in NumPy 1.10 or later. """ @@ -59,15 +57,14 @@ def test_array_repeat(self, xp): @testing.parameterize( - {'repeats': 0, 'axis': None}, - {'repeats': 2, 'axis': None}, - {'repeats': 2, 'axis': 0}, - {'repeats': [1, 2, 3, 4], 'axis': None}, - {'repeats': [1, 2, 3, 4], 'axis': 0}, + {"repeats": 0, "axis": None}, + {"repeats": 2, "axis": None}, + {"repeats": 2, "axis": 0}, + {"repeats": [1, 2, 3, 4], "axis": None}, + {"repeats": [1, 2, 3, 4], "axis": 0}, ) @testing.gpu class TestRepeat1D(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_array_repeat(self, xp): x = testing.shaped_arange((4,), xp) @@ -75,12 +72,11 @@ def test_array_repeat(self, xp): @testing.parameterize( - {'repeats': [2], 'axis': None}, - {'repeats': [2], 'axis': 0}, + {"repeats": [2], "axis": None}, + {"repeats": [2], "axis": 0}, ) @testing.gpu class TestRepeat1DListBroadcast(unittest.TestCase): - """See comment in TestRepeatListBroadcast class.""" @testing.numpy_cupy_array_equal() @@ -90,16 +86,15 @@ def test_array_repeat(self, xp): @testing.parameterize( - {'repeats': -3, 'axis': None}, - {'repeats': [-3, -3], 'axis': 0}, - {'repeats': [1, 2, 3], 'axis': None}, - {'repeats': [1, 2], 'axis': 1}, - {'repeats': 2, 'axis': -4}, - {'repeats': 2, 'axis': 3}, + {"repeats": -3, "axis": None}, + {"repeats": [-3, -3], "axis": 0}, + {"repeats": [1, 2, 3], "axis": None}, + {"repeats": [1, 2], "axis": 1}, + {"repeats": 2, "axis": -4}, + {"repeats": 2, "axis": 3}, ) @testing.gpu class TestRepeatFailure(unittest.TestCase): - def test_repeat_failure(self): for xp in (numpy, cupy): x = testing.shaped_arange((2, 3, 4), xp) @@ -108,16 +103,15 @@ def test_repeat_failure(self): @testing.parameterize( - {'reps': 0}, - {'reps': 1}, - {'reps': 2}, - {'reps': (0, 1)}, - {'reps': (2, 3)}, - {'reps': (2, 3, 4, 5)}, + {"reps": 0}, + {"reps": 1}, + {"reps": 2}, + {"reps": (0, 1)}, + {"reps": (2, 3)}, + {"reps": (2, 3, 4, 5)}, ) @testing.gpu class TestTile(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_array_tile(self, xp): x = testing.shaped_arange((2, 3, 4), xp) @@ -125,12 +119,11 @@ def test_array_tile(self, xp): @testing.parameterize( - {'reps': -1}, - {'reps': (-1, -2)}, + {"reps": -1}, + {"reps": (-1, -2)}, ) @testing.gpu class TestTileFailure(unittest.TestCase): - def test_tile_failure(self): for xp in (numpy, cupy): x = testing.shaped_arange((2, 3, 4), xp) diff --git a/tests/third_party/cupy/manipulation_tests/test_transpose.py b/tests/third_party/cupy/manipulation_tests/test_transpose.py index d12b268c2f4e..d758d15d3f77 100644 --- a/tests/third_party/cupy/manipulation_tests/test_transpose.py +++ b/tests/third_party/cupy/manipulation_tests/test_transpose.py @@ -9,7 +9,6 @@ @testing.gpu class TestTranspose(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_moveaxis1(self, xp): a = testing.shaped_arange((2, 3, 4), xp) diff --git a/tests/third_party/cupy/math_tests/test_arithmetic.py b/tests/third_party/cupy/math_tests/test_arithmetic.py index f4a92c8f25b7..36b4c3562eec 100644 --- a/tests/third_party/cupy/math_tests/test_arithmetic.py +++ b/tests/third_party/cupy/math_tests/test_arithmetic.py @@ -7,32 +7,46 @@ import dpnp as cupy from tests.third_party.cupy import testing - float_types = [numpy.float32, numpy.float64] complex_types = [] signed_int_types = [numpy.int32, numpy.int64] unsigned_int_types = [] int_types = signed_int_types + unsigned_int_types all_types = float_types + int_types + complex_types -negative_types = (float_types + signed_int_types + complex_types) +negative_types = float_types + signed_int_types + complex_types negative_no_complex_types = float_types + signed_int_types no_complex_types = float_types + int_types @testing.gpu -@testing.parameterize(*( - testing.product({ - 'nargs': [1], - 'name': ['reciprocal', 'angle'], - }) + testing.product({ - 'nargs': [2], - 'name': [ - 'add', 'multiply', 'divide', 'power', 'subtract', 'true_divide', - 'floor_divide', 'fmod', 'remainder', 'mod'], - }) -)) +@testing.parameterize( + *( + testing.product( + { + "nargs": [1], + "name": ["reciprocal", "angle"], + } + ) + + testing.product( + { + "nargs": [2], + "name": [ + "add", + "multiply", + "divide", + "power", + "subtract", + "true_divide", + "floor_divide", + "fmod", + "remainder", + "mod", + ], + } + ) + ) +) class TestArithmeticRaisesWithNumpyInput(unittest.TestCase): - def test_raises_with_numpy_input(self): nargs = self.nargs name = self.name @@ -49,16 +63,23 @@ def test_raises_with_numpy_input(self): @testing.gpu -@testing.parameterize(*( - testing.product({ - 'arg1': ([testing.shaped_arange((2, 3), numpy, dtype=d) + 1 - for d in all_types - ] + [2, 2.0]), - 'name': ['reciprocal'], - }) -)) +@testing.parameterize( + *( + testing.product( + { + "arg1": ( + [ + testing.shaped_arange((2, 3), numpy, dtype=d) + 1 + for d in all_types + ] + + [2, 2.0] + ), + "name": ["reciprocal"], + } + ) + ) +) class TestArithmeticUnary(unittest.TestCase): - @testing.numpy_cupy_allclose(atol=1e-5) def test_unary(self, xp): arg1 = self.arg1 @@ -66,7 +87,7 @@ def test_unary(self, xp): arg1 = xp.asarray(arg1) y = getattr(xp, self.name)(arg1) - if self.name in ('real', 'imag'): + if self.name in ("real", "imag"): # Some NumPy functions return Python scalars for Python scalar # inputs. # We need to convert them to arrays to compare with CuPy outputs. @@ -85,7 +106,6 @@ def test_unary(self, xp): class ArithmeticBinaryBase: - @testing.numpy_cupy_allclose(atol=1e-4) def check_binary(self, xp): arg1 = self.arg1 @@ -95,7 +115,7 @@ def check_binary(self, xp): dtype1 = np1.dtype dtype2 = np2.dtype - if self.name == 'power': + if self.name == "power": # TODO(niboshi): Fix this: power(0, 1j) # numpy => 1+0j # cupy => 0j @@ -122,15 +142,17 @@ def check_binary(self, xp): arg2 = xp.asarray(arg2) # Subtraction between booleans is not allowed. - if (self.name == 'subtract' - and dtype1 == numpy.bool_ - and dtype2 == numpy.bool_): + if ( + self.name == "subtract" + and dtype1 == numpy.bool_ + and dtype2 == numpy.bool_ + ): return xp.array(True) func = getattr(xp, self.name) - with testing.NumpyError(divide='ignore'): + with testing.NumpyError(divide="ignore"): with numpy.warnings.catch_warnings(): - numpy.warnings.filterwarnings('ignore') + numpy.warnings.filterwarnings("ignore") if self.use_dtype: y = func(arg1, arg2, dtype=self.dtype) else: @@ -157,86 +179,131 @@ def check_binary(self, xp): @testing.gpu -@testing.parameterize(*( - testing.product({ - # TODO(unno): boolean subtract causes DeprecationWarning in numpy>=1.13 - 'arg1': [testing.shaped_arange((2, 3), numpy, dtype=d) - for d in all_types - ] + [0, 0.0, 2, 2.0], - 'arg2': [testing.shaped_reverse_arange((2, 3), numpy, dtype=d) - for d in all_types - ] + [0, 0.0, 2, 2.0], - 'name': ['add', 'multiply', 'power', 'subtract'], - }) + testing.product({ - 'arg1': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in negative_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'arg2': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in negative_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'name': ['divide', 'true_divide', 'subtract'], - }) -)) +@testing.parameterize( + *( + testing.product( + { + # TODO(unno): boolean subtract causes DeprecationWarning in numpy>=1.13 + "arg1": [ + testing.shaped_arange((2, 3), numpy, dtype=d) + for d in all_types + ] + + [0, 0.0, 2, 2.0], + "arg2": [ + testing.shaped_reverse_arange((2, 3), numpy, dtype=d) + for d in all_types + ] + + [0, 0.0, 2, 2.0], + "name": ["add", "multiply", "power", "subtract"], + } + ) + + testing.product( + { + "arg1": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in negative_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "arg2": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in negative_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "name": ["divide", "true_divide", "subtract"], + } + ) + ) +) class TestArithmeticBinary(ArithmeticBinaryBase, unittest.TestCase): - def test_binary(self): self.use_dtype = False self.check_binary() @testing.gpu -@testing.parameterize(*( - testing.product({ - 'arg1': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in int_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'arg2': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in int_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'name': ['true_divide'], - 'dtype': [numpy.float64], - 'use_dtype': [True, False], - }) + testing.product({ - 'arg1': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in float_types] + [0.0, 2.0, -2.0], - 'arg2': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in float_types] + [0.0, 2.0, -2.0], - 'name': ['power', 'true_divide', 'subtract'], - 'dtype': [numpy.float64], - 'use_dtype': [True, False], - }) + testing.product({ - 'arg1': [testing.shaped_arange((2, 3), numpy, dtype=d) - for d in no_complex_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'arg2': [testing.shaped_reverse_arange((2, 3), numpy, dtype=d) - for d in no_complex_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'name': ['floor_divide', 'fmod', 'remainder', 'mod'], - 'dtype': [numpy.float64], - 'use_dtype': [True, False], - }) + testing.product({ - 'arg1': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in negative_no_complex_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'arg2': [numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) - for d in negative_no_complex_types - ] + [0, 0.0, 2, 2.0, -2, -2.0], - 'name': ['floor_divide', 'fmod', 'remainder', 'mod'], - 'dtype': [numpy.float64], - 'use_dtype': [True, False], - }) -)) +@testing.parameterize( + *( + testing.product( + { + "arg1": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in int_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "arg2": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in int_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "name": ["true_divide"], + "dtype": [numpy.float64], + "use_dtype": [True, False], + } + ) + + testing.product( + { + "arg1": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in float_types + ] + + [0.0, 2.0, -2.0], + "arg2": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in float_types + ] + + [0.0, 2.0, -2.0], + "name": ["power", "true_divide", "subtract"], + "dtype": [numpy.float64], + "use_dtype": [True, False], + } + ) + + testing.product( + { + "arg1": [ + testing.shaped_arange((2, 3), numpy, dtype=d) + for d in no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "arg2": [ + testing.shaped_reverse_arange((2, 3), numpy, dtype=d) + for d in no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "name": ["floor_divide", "fmod", "remainder", "mod"], + "dtype": [numpy.float64], + "use_dtype": [True, False], + } + ) + + testing.product( + { + "arg1": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in negative_no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "arg2": [ + numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) + for d in negative_no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0], + "name": ["floor_divide", "fmod", "remainder", "mod"], + "dtype": [numpy.float64], + "use_dtype": [True, False], + } + ) + ) +) class TestArithmeticBinary2(ArithmeticBinaryBase, unittest.TestCase): - def test_binary(self): - if (self.use_dtype and - numpy.lib.NumpyVersion(numpy.__version__) < '1.10.0'): - raise unittest.SkipTest('Test for numpy>=1.10') + if ( + self.use_dtype + and numpy.lib.NumpyVersion(numpy.__version__) < "1.10.0" + ): + raise unittest.SkipTest("Test for numpy>=1.10") self.check_binary() class TestArithmeticModf(unittest.TestCase): - @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_modf(self, xp, dtype): @@ -248,17 +315,15 @@ def test_modf(self, xp, dtype): return d -@testing.parameterize(*testing.product({ - 'xp': [numpy, cupy], - 'shape': [(3, 2), (), (3, 0, 2)] -})) +@testing.parameterize( + *testing.product({"xp": [numpy, cupy], "shape": [(3, 2), (), (3, 0, 2)]}) +) @testing.gpu class TestBoolSubtract(unittest.TestCase): - def test_bool_subtract(self): xp = self.xp - if xp is numpy and not testing.numpy_satisfies('>=1.14.0'): - raise unittest.SkipTest('NumPy<1.14.0') + if xp is numpy and not testing.numpy_satisfies(">=1.14.0"): + raise unittest.SkipTest("NumPy<1.14.0") shape = self.shape x = testing.shaped_random(shape, xp, dtype=numpy.bool_) y = testing.shaped_random(shape, xp, dtype=numpy.bool_) diff --git a/tests/third_party/cupy/math_tests/test_explog.py b/tests/third_party/cupy/math_tests/test_explog.py index ca48092a15a9..adc6c1a8735a 100644 --- a/tests/third_party/cupy/math_tests/test_explog.py +++ b/tests/third_party/cupy/math_tests/test_explog.py @@ -7,12 +7,11 @@ @testing.gpu class TestExplog(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype, no_complex=False): if no_complex: - if numpy.dtype(dtype).kind == 'c': + if numpy.dtype(dtype).kind == "c": return xp.array(True) a = testing.shaped_arange((2, 3), xp, dtype) return getattr(xp, name)(a) @@ -21,38 +20,38 @@ def check_unary(self, name, xp, dtype, no_complex=False): @testing.numpy_cupy_allclose(atol=1e-5) def check_binary(self, name, xp, dtype, no_complex=False): if no_complex: - if numpy.dtype(dtype).kind == 'c': + if numpy.dtype(dtype).kind == "c": return xp.array(True) a = testing.shaped_arange((2, 3), xp, dtype) b = testing.shaped_reverse_arange((2, 3), xp, dtype) return getattr(xp, name)(a, b) def test_exp(self): - self.check_unary('exp') + self.check_unary("exp") def test_expm1(self): - self.check_unary('expm1') + self.check_unary("expm1") def test_exp2(self): - self.check_unary('exp2') + self.check_unary("exp2") def test_log(self): - with testing.NumpyError(divide='ignore'): - self.check_unary('log') + with testing.NumpyError(divide="ignore"): + self.check_unary("log") def test_log10(self): - with testing.NumpyError(divide='ignore'): - self.check_unary('log10') + with testing.NumpyError(divide="ignore"): + self.check_unary("log10") def test_log2(self): - with testing.NumpyError(divide='ignore'): - self.check_unary('log2') + with testing.NumpyError(divide="ignore"): + self.check_unary("log2") def test_log1p(self): - self.check_unary('log1p') + self.check_unary("log1p") def test_logaddexp(self): - self.check_binary('logaddexp', no_complex=True) + self.check_binary("logaddexp", no_complex=True) def test_logaddexp2(self): - self.check_binary('logaddexp2', no_complex=True) + self.check_binary("logaddexp2", no_complex=True) diff --git a/tests/third_party/cupy/math_tests/test_floating.py b/tests/third_party/cupy/math_tests/test_floating.py index e526f89a5a72..2df027aee035 100644 --- a/tests/third_party/cupy/math_tests/test_floating.py +++ b/tests/third_party/cupy/math_tests/test_floating.py @@ -8,15 +8,13 @@ @testing.gpu class TestFloating(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_signbit(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.signbit(a) - @testing.for_all_dtypes_combination( - ('dtype_a', 'dtype_b'), no_complex=True) + @testing.for_all_dtypes_combination(("dtype_a", "dtype_b"), no_complex=True) @testing.numpy_cupy_array_equal() def test_copysign_combination(self, xp, dtype_a, dtype_b): a = testing.shaped_arange((2, 3), xp, dtype_a) @@ -30,8 +28,8 @@ def test_copysign_float(self, xp, dtype): b = xp.array([-xp.inf, -3, -0.0, 0, 3, xp.inf], dtype=dtype)[None, :] return xp.copysign(a, b) - @testing.for_float_dtypes(name='ftype') - @testing.for_dtypes(['i', 'l'], name='itype') + @testing.for_float_dtypes(name="ftype") + @testing.for_dtypes(["i", "l"], name="itype") @testing.numpy_cupy_array_equal() def test_ldexp(self, xp, ftype, itype): a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=ftype) @@ -40,8 +38,9 @@ def test_ldexp(self, xp, ftype, itype): @testing.for_float_dtypes() def test_frexp(self, dtype): - numpy_a = numpy.array([-300, -20, -10, -1, 0, 1, 10, 20, 300], - dtype=dtype) + numpy_a = numpy.array( + [-300, -20, -10, -1, 0, 1, 10, 20, 300], dtype=dtype + ) numpy_b, numpy_c = numpy.frexp(numpy_a) cupy_a = cupy.array(numpy_a) @@ -50,8 +49,7 @@ def test_frexp(self, dtype): testing.assert_array_equal(cupy_b, numpy_b) testing.assert_array_equal(cupy_c, numpy_c) - @testing.for_all_dtypes_combination( - ('dtype_a', 'dtype_b'), no_complex=True) + @testing.for_all_dtypes_combination(("dtype_a", "dtype_b"), no_complex=True) @testing.numpy_cupy_array_equal() def test_nextafter_combination(self, xp, dtype_a, dtype_b): a = testing.shaped_arange((2, 3), xp, dtype_a) diff --git a/tests/third_party/cupy/math_tests/test_hyperbolic.py b/tests/third_party/cupy/math_tests/test_hyperbolic.py index c7e479c86c1d..6fa732c0200b 100644 --- a/tests/third_party/cupy/math_tests/test_hyperbolic.py +++ b/tests/third_party/cupy/math_tests/test_hyperbolic.py @@ -5,36 +5,35 @@ @testing.gpu class TestHyperbolic(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return getattr(xp, name)(a) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_unit(self, name, xp, dtype): a = xp.array([0.2, 0.4, 0.6, 0.8], dtype=dtype) return getattr(xp, name)(a) def test_sinh(self): - self.check_unary('sinh') + self.check_unary("sinh") def test_cosh(self): - self.check_unary('cosh') + self.check_unary("cosh") def test_tanh(self): - self.check_unary('tanh') + self.check_unary("tanh") def test_arcsinh(self): - self.check_unary('arcsinh') + self.check_unary("arcsinh") - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def test_arccosh(self, xp, dtype): a = xp.array([1, 2, 3], dtype=dtype) return xp.arccosh(a) def test_arctanh(self): - self.check_unary_unit('arctanh') + self.check_unary_unit("arctanh") diff --git a/tests/third_party/cupy/math_tests/test_matmul.py b/tests/third_party/cupy/math_tests/test_matmul.py index 3d9d258fd9d8..a7dbd3a328a2 100644 --- a/tests/third_party/cupy/math_tests/test_matmul.py +++ b/tests/third_party/cupy/math_tests/test_matmul.py @@ -9,62 +9,64 @@ @testing.parameterize( - *testing.product({ - 'shape_pair': [ - # dot test - ((3, 2), (2, 4)), - ((3, 0), (0, 4)), - ((0, 2), (2, 4)), - ((3, 2), (2, 0)), - ((2,), (2, 4)), - ((0,), (0, 4)), - ((3, 2), (2,)), - ((3, 0), (0,)), - ((2,), (2,)), - ((0,), (0,)), - # matmul test - ((5, 3, 2), (5, 2, 4)), - # ((0, 3, 2), (0, 2, 4)), - # ((5, 3, 2), (2, 4)), - # ((0, 3, 2), (2, 4)), - # ((3, 2), (5, 2, 4)), - # ((3, 2), (0, 2, 4)), - # ((5, 3, 2), (1, 2, 4)), - # ((0, 3, 2), (1, 2, 4)), - # ((1, 3, 2), (5, 2, 4)), - # ((1, 3, 2), (0, 2, 4)), - # ((5, 3, 2), (2,)), - # ((5, 3, 0), (0,)), - # ((2,), (5, 2, 4)), - # ((0,), (5, 0, 4)), - # ((2, 2, 3, 2), (2, 2, 2, 4)), - # ((5, 0, 3, 2), (5, 0, 2, 4)), - # ((6, 5, 3, 2), (2, 4)), - # ((5, 0, 3, 2), (2, 4)), - # ((3, 2), (6, 5, 2, 4)), - # ((3, 2), (5, 0, 2, 4)), - # ((1, 5, 3, 2), (6, 1, 2, 4)), - # ((1, 0, 3, 2), (6, 1, 2, 4)), - # ((6, 1, 3, 2), (1, 5, 2, 4)), - # ((6, 1, 3, 2), (1, 0, 2, 4)), - # ((6, 5, 3, 2), (2,)), - # ((6, 5, 3, 0), (0,)), - # ((2,), (6, 5, 2, 4)), - # ((0,), (6, 5, 0, 4)), - ((1, 3, 3), (10, 1, 3, 1)), - ], - })) + *testing.product( + { + "shape_pair": [ + # dot test + ((3, 2), (2, 4)), + ((3, 0), (0, 4)), + ((0, 2), (2, 4)), + ((3, 2), (2, 0)), + ((2,), (2, 4)), + ((0,), (0, 4)), + ((3, 2), (2,)), + ((3, 0), (0,)), + ((2,), (2,)), + ((0,), (0,)), + # matmul test + ((5, 3, 2), (5, 2, 4)), + # ((0, 3, 2), (0, 2, 4)), + # ((5, 3, 2), (2, 4)), + # ((0, 3, 2), (2, 4)), + # ((3, 2), (5, 2, 4)), + # ((3, 2), (0, 2, 4)), + # ((5, 3, 2), (1, 2, 4)), + # ((0, 3, 2), (1, 2, 4)), + # ((1, 3, 2), (5, 2, 4)), + # ((1, 3, 2), (0, 2, 4)), + # ((5, 3, 2), (2,)), + # ((5, 3, 0), (0,)), + # ((2,), (5, 2, 4)), + # ((0,), (5, 0, 4)), + # ((2, 2, 3, 2), (2, 2, 2, 4)), + # ((5, 0, 3, 2), (5, 0, 2, 4)), + # ((6, 5, 3, 2), (2, 4)), + # ((5, 0, 3, 2), (2, 4)), + # ((3, 2), (6, 5, 2, 4)), + # ((3, 2), (5, 0, 2, 4)), + # ((1, 5, 3, 2), (6, 1, 2, 4)), + # ((1, 0, 3, 2), (6, 1, 2, 4)), + # ((6, 1, 3, 2), (1, 5, 2, 4)), + # ((6, 1, 3, 2), (1, 0, 2, 4)), + # ((6, 5, 3, 2), (2,)), + # ((6, 5, 3, 0), (0,)), + # ((2,), (6, 5, 2, 4)), + # ((0,), (6, 5, 0, 4)), + ((1, 3, 3), (10, 1, 3, 1)), + ], + } + ) +) @testing.gpu class TestMatmul(unittest.TestCase): - - @testing.for_all_dtypes(name='dtype1') + @testing.for_all_dtypes(name="dtype1") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_operator_matmul(self, xp, dtype1): x1 = testing.shaped_arange(self.shape_pair[0], xp, dtype1) x2 = testing.shaped_arange(self.shape_pair[1], xp, dtype1) return operator.matmul(x1, x2) - @testing.for_all_dtypes(name='dtype1') + @testing.for_all_dtypes(name="dtype1") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_cupy_matmul(self, xp, dtype1): x1 = testing.shaped_arange(self.shape_pair[0], xp, dtype1) @@ -73,21 +75,24 @@ def test_cupy_matmul(self, xp, dtype1): @testing.parameterize( - *testing.product({ - 'shape_pair': [ - ((6, 5, 3, 2), (6, 5, 2, 4)), - ((6, 5, 3, 2), (6, 1, 2, 4)), - ((6, 5, 3, 2), (1, 5, 2, 4)), - ((6, 5, 3, 2), (1, 1, 2, 4)), - ((6, 1, 3, 2), (6, 5, 2, 4)), - ((1, 5, 3, 2), (6, 5, 2, 4)), - ((1, 1, 3, 2), (6, 5, 2, 4)), - ((3, 2), (6, 5, 2, 4)), - ((6, 5, 3, 2), (2, 4)), - ((2,), (6, 5, 2, 4)), - ((6, 5, 3, 2), (2,)), - ], - })) + *testing.product( + { + "shape_pair": [ + ((6, 5, 3, 2), (6, 5, 2, 4)), + ((6, 5, 3, 2), (6, 1, 2, 4)), + ((6, 5, 3, 2), (1, 5, 2, 4)), + ((6, 5, 3, 2), (1, 1, 2, 4)), + ((6, 1, 3, 2), (6, 5, 2, 4)), + ((1, 5, 3, 2), (6, 5, 2, 4)), + ((1, 1, 3, 2), (6, 5, 2, 4)), + ((3, 2), (6, 5, 2, 4)), + ((6, 5, 3, 2), (2, 4)), + ((2,), (6, 5, 2, 4)), + ((6, 5, 3, 2), (2,)), + ], + } + ) +) @testing.gpu class TestMatmulLarge(unittest.TestCase): @@ -103,21 +108,25 @@ class TestMatmulLarge(unittest.TestCase): (numpy.uint16, numpy.uint16), } - @testing.for_all_dtypes(name='dtype1') + @testing.for_all_dtypes(name="dtype1") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_operator_matmul(self, xp, dtype1): - if ((dtype1, dtype1) in self.skip_dtypes or - (dtype1, dtype1) in self.skip_dtypes): + if (dtype1, dtype1) in self.skip_dtypes or ( + dtype1, + dtype1, + ) in self.skip_dtypes: return xp.array([]) x1 = testing.shaped_random(self.shape_pair[0], xp, dtype1) x2 = testing.shaped_random(self.shape_pair[1], xp, dtype1) return operator.matmul(x1, x2) - @testing.for_all_dtypes(name='dtype1') + @testing.for_all_dtypes(name="dtype1") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_cupy_matmul(self, xp, dtype1): - if ((dtype1, dtype1) in self.skip_dtypes or - (dtype1, dtype1) in self.skip_dtypes): + if (dtype1, dtype1) in self.skip_dtypes or ( + dtype1, + dtype1, + ) in self.skip_dtypes: return xp.array([]) shape1, shape2 = self.shape_pair x1 = testing.shaped_random(shape1, xp, dtype1) @@ -126,21 +135,23 @@ def test_cupy_matmul(self, xp, dtype1): @testing.parameterize( - *testing.product({ - 'shape_pair': [ - ((5, 3, 1), (3, 1, 4)), - ((3, 2, 3), (3, 2, 4)), - ((3, 2), ()), - ((), (3, 2)), - ((), ()), - ((3, 2), (1,)), - ((0, 2), (3, 0)), - ((0, 1, 1), (2, 1, 1)), - ], - })) + *testing.product( + { + "shape_pair": [ + ((5, 3, 1), (3, 1, 4)), + ((3, 2, 3), (3, 2, 4)), + ((3, 2), ()), + ((), (3, 2)), + ((), ()), + ((3, 2), (1,)), + ((0, 2), (3, 0)), + ((0, 1, 1), (2, 1, 1)), + ], + } + ) +) @testing.gpu class TestMatmulInvalidShape(unittest.TestCase): - def test_invalid_shape(self): for xp in (numpy, dpnp): shape1, shape2 = self.shape_pair diff --git a/tests/third_party/cupy/math_tests/test_misc.py b/tests/third_party/cupy/math_tests/test_misc.py index 729b121467e4..545712c7547b 100644 --- a/tests/third_party/cupy/math_tests/test_misc.py +++ b/tests/third_party/cupy/math_tests/test_misc.py @@ -10,11 +10,10 @@ @testing.gpu class TestMisc(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype, no_bool=False): - if no_bool and numpy.dtype(dtype).char == '?': + if no_bool and numpy.dtype(dtype).char == "?": return numpy.int_(0) a = testing.shaped_arange((2, 3), xp, dtype) return getattr(xp, name)(a) @@ -22,73 +21,86 @@ def check_unary(self, name, xp, dtype, no_bool=False): @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_binary(self, name, xp, dtype, no_bool=False): - if no_bool and numpy.dtype(dtype).char == '?': + if no_bool and numpy.dtype(dtype).char == "?": return numpy.int_(0) a = testing.shaped_arange((2, 3), xp, dtype) b = testing.shaped_reverse_arange((2, 3), xp, dtype) return getattr(xp, name)(a, b) - @testing.for_dtypes(['i', 'q', 'f', 'd']) + @testing.for_dtypes(["i", "q", "f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_negative(self, name, xp, dtype, no_bool=False): - if no_bool and numpy.dtype(dtype).char == '?': + if no_bool and numpy.dtype(dtype).char == "?": return numpy.int_(0) a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype) - if numpy.dtype(dtype).kind == 'c': + if numpy.dtype(dtype).kind == "c": a += (a * 1j).astype(dtype) return getattr(xp, name)(a) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_inf(self, name, xp, dtype): inf = numpy.inf - if numpy.dtype(dtype).kind != 'c': + if numpy.dtype(dtype).kind != "c": a = xp.array([0, -1, 1, -inf, inf], dtype=dtype) else: - a = xp.array([complex(x, y) - for x in [0, -1, 1, -inf, inf] - for y in [0, -1, 1, -inf, inf]], - dtype=dtype) + a = xp.array( + [ + complex(x, y) + for x in [0, -1, 1, -inf, inf] + for y in [0, -1, 1, -inf, inf] + ], + dtype=dtype, + ) return getattr(xp, name)(a) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_nan(self, name, xp, dtype): nan = numpy.nan - if numpy.dtype(dtype).kind != 'c': + if numpy.dtype(dtype).kind != "c": a = xp.array([0, -1, 1, -nan, nan], dtype=dtype) else: - a = xp.array([complex(x, y) - for x in [0, -1, 1, -nan, nan] - for y in [0, -1, 1, -nan, nan]], - dtype=dtype) + a = xp.array( + [ + complex(x, y) + for x in [0, -1, 1, -nan, nan] + for y in [0, -1, 1, -nan, nan] + ], + dtype=dtype, + ) return getattr(xp, name)(a) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_inf_nan(self, name, xp, dtype): inf = numpy.inf nan = numpy.nan - if numpy.dtype(dtype).kind != 'c': + if numpy.dtype(dtype).kind != "c": a = xp.array([0, -1, 1, -inf, inf, -nan, nan], dtype=dtype) else: - a = xp.array([complex(x, y) - for x in [0, -1, 1, -inf, inf, -nan, nan] - for y in [0, -1, 1, -inf, inf, -nan, nan]], - dtype=dtype) + a = xp.array( + [ + complex(x, y) + for x in [0, -1, 1, -inf, inf, -nan, nan] + for y in [0, -1, 1, -inf, inf, -nan, nan] + ], + dtype=dtype, + ) return getattr(xp, name)(a) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_array_equal() def check_binary_nan(self, name, xp, dtype): - a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2], - dtype=dtype) - b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2], - dtype=dtype) + a = xp.array( + [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2], dtype=dtype + ) + b = xp.array( + [numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2], dtype=dtype + ) return getattr(xp, name)(a, b) - @unittest.skipIf( - sys.platform == 'win32', 'dtype problem on Windows') + @unittest.skipIf(sys.platform == "win32", "dtype problem on Windows") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_clip1(self, xp, dtype): @@ -120,8 +132,7 @@ def test_clip_min_max_none(self, dtype): with pytest.raises(ValueError): a.clip(None, None) - @unittest.skipIf( - sys.platform == 'win32', 'dtype problem on Windows') + @unittest.skipIf(sys.platform == "win32", "dtype problem on Windows") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_external_clip1(self, xp, dtype): @@ -143,7 +154,7 @@ def test_clip2(self, xp, dtype): return a.clip(a_min, a_max) def test_sqrt(self): - self.check_unary('sqrt') + self.check_unary("sqrt") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) @@ -152,64 +163,64 @@ def test_cbrt(self, xp, dtype): return xp.cbrt(a) def test_square(self): - self.check_unary('square') + self.check_unary("square") def test_absolute(self): - self.check_unary('absolute') + self.check_unary("absolute") def test_absolute_negative(self): - self.check_unary_negative('absolute') + self.check_unary_negative("absolute") def test_fabs(self): - self.check_unary('fabs') + self.check_unary("fabs") def test_sign(self): - self.check_unary('sign', no_bool=True) + self.check_unary("sign", no_bool=True) def test_sign_negative(self): - self.check_unary_negative('sign', no_bool=True) + self.check_unary_negative("sign", no_bool=True) def test_maximum(self): - self.check_binary('maximum') + self.check_binary("maximum") def test_maximum_nan(self): - self.check_binary_nan('maximum') + self.check_binary_nan("maximum") def test_minimum(self): - self.check_binary('minimum') + self.check_binary("minimum") def test_minimum_nan(self): - self.check_binary_nan('minimum') + self.check_binary_nan("minimum") def test_fmax(self): - self.check_binary('fmax') + self.check_binary("fmax") def test_fmax_nan(self): - self.check_binary_nan('fmax') + self.check_binary_nan("fmax") def test_fmin(self): - self.check_binary('fmin') + self.check_binary("fmin") def test_fmin_nan(self): - self.check_binary_nan('fmin') + self.check_binary_nan("fmin") def test_nan_to_num(self): - self.check_unary('nan_to_num') + self.check_unary("nan_to_num") def test_nan_to_num_negative(self): - self.check_unary_negative('nan_to_num') + self.check_unary_negative("nan_to_num") def test_nan_to_num_for_old_numpy(self): - self.check_unary('nan_to_num', no_bool=True) + self.check_unary("nan_to_num", no_bool=True) def test_nan_to_num_negative_for_old_numpy(self): - self.check_unary_negative('nan_to_num', no_bool=True) + self.check_unary_negative("nan_to_num", no_bool=True) def test_nan_to_num_inf(self): - self.check_unary_inf('nan_to_num') + self.check_unary_inf("nan_to_num") def test_nan_to_num_nan(self): - self.check_unary_nan('nan_to_num') + self.check_unary_nan("nan_to_num") def test_nan_to_num_inf_nan(self): - self.check_unary_inf_nan('nan_to_num') + self.check_unary_inf_nan("nan_to_num") diff --git a/tests/third_party/cupy/math_tests/test_rounding.py b/tests/third_party/cupy/math_tests/test_rounding.py index 034e36c9f14b..cf61c3a6212c 100644 --- a/tests/third_party/cupy/math_tests/test_rounding.py +++ b/tests/third_party/cupy/math_tests/test_rounding.py @@ -9,7 +9,6 @@ @testing.gpu class TestRounding(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): @@ -29,7 +28,7 @@ def check_unary_complex_unsupported(self, name, dtype): with pytest.raises(TypeError): getattr(xp, name)(a) - @testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd']) + @testing.for_dtypes(["?", "b", "h", "i", "q", "e", "f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_negative(self, name, xp, dtype): a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype) @@ -38,45 +37,51 @@ def check_unary_negative(self, name, xp, dtype): @testing.for_complex_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_negative_complex(self, name, xp, dtype): - a = xp.array([-3 - 3j, -2 - 2j, -1 - 1j, 1 + 1j, 2 + 2j, 3 + 3j], dtype=dtype) + a = xp.array( + [-3 - 3j, -2 - 2j, -1 - 1j, 1 + 1j, 2 + 2j, 3 + 3j], dtype=dtype + ) return getattr(xp, name)(a) def test_rint(self): - self.check_unary('rint') - self.check_unary_complex('rint') + self.check_unary("rint") + self.check_unary_complex("rint") def test_rint_negative(self): - self.check_unary_negative('rint') - self.check_unary_negative_complex('rint') + self.check_unary_negative("rint") + self.check_unary_negative_complex("rint") def test_floor(self): - self.check_unary('floor') - self.check_unary_complex_unsupported('floor') + self.check_unary("floor") + self.check_unary_complex_unsupported("floor") def test_ceil(self): - self.check_unary('ceil') - self.check_unary_complex_unsupported('ceil') + self.check_unary("ceil") + self.check_unary_complex_unsupported("ceil") def test_trunc(self): - self.check_unary('trunc') - self.check_unary_complex_unsupported('trunc') + self.check_unary("trunc") + self.check_unary_complex_unsupported("trunc") def test_fix(self): - self.check_unary('fix') - self.check_unary_complex_unsupported('fix') + self.check_unary("fix") + self.check_unary_complex_unsupported("fix") def test_around(self): - self.check_unary('around') - self.check_unary_complex('around') + self.check_unary("around") + self.check_unary_complex("around") def test_round_(self): - self.check_unary('round_') - self.check_unary_complex('around') + self.check_unary("round_") + self.check_unary_complex("around") -@testing.parameterize(*testing.product({ - 'decimals': [-2, -1, 0, 1, 2], -})) +@testing.parameterize( + *testing.product( + { + "decimals": [-2, -1, 0, 1, 2], + } + ) +) class TestRound(unittest.TestCase): shape = (20,) @@ -97,15 +102,19 @@ def test_round(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_round_out(self, xp): - a = testing.shaped_random(self.shape, xp, scale=100, dtype='d') + a = testing.shaped_random(self.shape, xp, scale=100, dtype="d") out = xp.empty_like(a) xp.around(a, self.decimals, out) return out -@testing.parameterize(*testing.product({ - 'decimals': [-100, -99, -90, 0, 90, 99, 100], -})) +@testing.parameterize( + *testing.product( + { + "decimals": [-100, -99, -90, 0, 90, 99, 100], + } + ) +) class TestRoundExtreme(unittest.TestCase): shape = (20,) @@ -123,21 +132,24 @@ def test_round_small(self, xp, dtype): return xp.around(a, self.decimals) -@testing.parameterize(*testing.product({ - 'value': [ - (14, -1), - (15, -1), - (16, -1), - (14.0, -1), - (15.0, -1), - (16.0, -1), - (1.4, 0), - (1.5, 0), - (1.6, 0), - ] -})) +@testing.parameterize( + *testing.product( + { + "value": [ + (14, -1), + (15, -1), + (16, -1), + (14.0, -1), + (15.0, -1), + (16.0, -1), + (1.4, 0), + (1.5, 0), + (1.6, 0), + ] + } + ) +) class TestRoundBorder(unittest.TestCase): - @testing.numpy_cupy_allclose(atol=1e-5) def test_around_positive1(self, xp): a, decimals = self.value diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index 15066f03872e..97d3d14cabb5 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -9,7 +9,6 @@ @testing.gpu class TestSumprod(unittest.TestCase): - def tearDown(self): # Free huge memory for slow test # cupy.get_default_memory_pool().free_all_blocks() @@ -61,7 +60,7 @@ def test_sum_axis(self, xp, dtype): @testing.slow @testing.numpy_cupy_allclose() def test_sum_axis_huge(self, xp): - a = testing.shaped_random((204, 102, 102), xp, 'd') + a = testing.shaped_random((204, 102, 102), xp, "d") return a.sum(axis=2) @testing.for_all_dtypes() @@ -114,13 +113,13 @@ def test_sum_axes4(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) return a.sum(axis=(0, 2, 3)) - @testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype']) + @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() def test_sum_dtype(self, xp, src_dtype, dst_dtype): a = testing.shaped_arange((2, 3, 4), xp, src_dtype) return a.sum(dtype=dst_dtype) - @testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype']) + @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() def test_sum_keepdims_and_dtype(self, xp, src_dtype, dst_dtype): a = testing.shaped_arange((2, 3, 4), xp, src_dtype) @@ -170,7 +169,7 @@ def test_external_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.prod(a, axis=1) - @testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype']) + @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() def test_prod_dtype(self, xp, src_dtype, dst_dtype): a = testing.shaped_arange((2, 3), xp, src_dtype) @@ -178,23 +177,26 @@ def test_prod_dtype(self, xp, src_dtype, dst_dtype): @testing.parameterize( - *testing.product({ - 'shape': [(2, 3, 4), (20, 30, 40)], - 'axis': [0, 1], - 'transpose_axes': [True, False], - 'keepdims': [True, False], - 'func': ['nansum', 'nanprod'] - }) + *testing.product( + { + "shape": [(2, 3, 4), (20, 30, 40)], + "axis": [0, 1], + "transpose_axes": [True, False], + "keepdims": [True, False], + "func": ["nansum", "nanprod"], + } + ) ) @testing.gpu class TestNansumNanprodLong(unittest.TestCase): - def _do_transposed_axis_test(self): return not self.transpose_axes and self.axis != 1 def _numpy_nanprod_implemented(self): - return (self.func == 'nanprod' and - numpy.__version__ >= numpy.lib.NumpyVersion('1.10.0')) + return ( + self.func == "nanprod" + and numpy.__version__ >= numpy.lib.NumpyVersion("1.10.0") + ) def _test(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype) @@ -208,28 +210,33 @@ def _test(self, xp, dtype): @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose() def test_nansum_all(self, xp, dtype): - if (not self._numpy_nanprod_implemented() or - not self._do_transposed_axis_test()): + if ( + not self._numpy_nanprod_implemented() + or not self._do_transposed_axis_test() + ): return xp.array(()) return self._test(xp, dtype) @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose(contiguous_check=False) def test_nansum_axis_transposed(self, xp, dtype): - if (not self._numpy_nanprod_implemented() or - not self._do_transposed_axis_test()): + if ( + not self._numpy_nanprod_implemented() + or not self._do_transposed_axis_test() + ): return xp.array(()) return self._test(xp, dtype) @testing.parameterize( - *testing.product({ - 'shape': [(2, 3, 4), (20, 30, 40)], - }) + *testing.product( + { + "shape": [(2, 3, 4), (20, 30, 40)], + } + ) ) @testing.gpu class TestNansumNanprodExtra(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose() def test_nansum_out(self, xp, dtype): @@ -249,10 +256,12 @@ def test_nansum_out_wrong_shape(self): @testing.parameterize( - *testing.product({ - 'shape': [(2, 3, 4, 5), (20, 30, 40, 50)], - 'axis': [(1, 3), (0, 2, 3)], - }) + *testing.product( + { + "shape": [(2, 3, 4, 5), (20, 30, 40, 50)], + "axis": [(1, 3), (0, 2, 3)], + } + ) ) @testing.gpu class TestNansumNanprodAxes(unittest.TestCase): @@ -268,10 +277,9 @@ def test_nansum_axes(self, xp, dtype): axes = [0, 1, 2] -@testing.parameterize(*testing.product({'axis': axes})) +@testing.parameterize(*testing.product({"axis": axes})) @testing.gpu class TestCumsum(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_cumsum(self, xp, dtype): @@ -323,7 +331,9 @@ def test_cumsum_axis_out_noncontiguous(self, xp, dtype): n = len(axes) shape = tuple(range(4, 4 + n)) a = testing.shaped_arange(shape, xp, dtype) - out = xp.zeros((8,) + shape[1:], dtype=dtype)[::2] # Non contiguous view + out = xp.zeros((8,) + shape[1:], dtype=dtype)[ + ::2 + ] # Non contiguous view xp.cumsum(a, axis=self.axis, out=out) return out @@ -380,7 +390,6 @@ def test_cumsum_numpy_array(self, dtype): @testing.gpu class TestCumprod(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_cumprod_1dim(self, xp, dtype): @@ -423,11 +432,11 @@ def test_ndarray_cumprod_2dim_with_axis(self, xp, dtype): @testing.slow def test_cumprod_huge_array(self): - size = 2 ** 32 + size = 2**32 # Free huge memory for slow test cupy.get_default_memory_pool().free_all_blocks() - a = cupy.ones(size, 'b') - result = cupy.cumprod(a, dtype='b') + a = cupy.ones(size, "b") + result = cupy.cumprod(a, dtype="b") del a self.assertTrue((result == 1).all()) # Free huge memory for slow test @@ -474,7 +483,6 @@ def test_cumprod_numpy_array(self, dtype): @testing.gpu class TestDiff(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_1dim(self, xp, dtype): @@ -505,7 +513,7 @@ def test_diff_2dim_with_n_and_axis(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) return xp.diff(a, 2, 1) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_2dim_with_prepend(self, xp, dtype): @@ -513,7 +521,7 @@ def test_diff_2dim_with_prepend(self, xp, dtype): b = testing.shaped_arange((4, 1), xp, dtype) return xp.diff(a, axis=-1, prepend=b) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_2dim_with_append(self, xp, dtype): @@ -521,14 +529,14 @@ def test_diff_2dim_with_append(self, xp, dtype): b = testing.shaped_arange((1, 5), xp, dtype) return xp.diff(a, axis=0, append=b, n=2) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_2dim_with_scalar_append(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) return xp.diff(a, prepend=1, append=0) - @testing.with_requires('numpy>=1.16') + @testing.with_requires("numpy>=1.16") def test_diff_invalid_axis(self): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3, 4), xp) diff --git a/tests/third_party/cupy/math_tests/test_trigonometric.py b/tests/third_party/cupy/math_tests/test_trigonometric.py index ba7c01d83b8b..410643067691 100644 --- a/tests/third_party/cupy/math_tests/test_trigonometric.py +++ b/tests/third_party/cupy/math_tests/test_trigonometric.py @@ -5,7 +5,6 @@ @testing.gpu class TestTrigonometric(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): @@ -19,46 +18,45 @@ def check_binary(self, name, xp, dtype): b = testing.shaped_reverse_arange((2, 3), xp, dtype) return getattr(xp, name)(a, b) - @testing.for_dtypes(['f', 'd']) + @testing.for_dtypes(["f", "d"]) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_unit(self, name, xp, dtype): a = xp.array([0.2, 0.4, 0.6, 0.8], dtype=dtype) return getattr(xp, name)(a) def test_sin(self): - self.check_unary('sin') + self.check_unary("sin") def test_cos(self): - self.check_unary('cos') + self.check_unary("cos") def test_tan(self): - self.check_unary('tan') + self.check_unary("tan") def test_arcsin(self): - self.check_unary_unit('arcsin') + self.check_unary_unit("arcsin") def test_arccos(self): - self.check_unary_unit('arccos') + self.check_unary_unit("arccos") def test_arctan(self): - self.check_unary('arctan') + self.check_unary("arctan") def test_arctan2(self): - self.check_binary('arctan2') + self.check_binary("arctan2") def test_hypot(self): - self.check_binary('hypot') + self.check_binary("hypot") def test_deg2rad(self): - self.check_unary('deg2rad') + self.check_unary("deg2rad") def test_rad2deg(self): - self.check_unary('rad2deg') + self.check_unary("rad2deg") @testing.gpu class TestUnwrap(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_unwrap_1dim(self, xp, dtype): diff --git a/tests/third_party/cupy/random_tests/test_distributions.py b/tests/third_party/cupy/random_tests/test_distributions.py index 1504cad45c60..9735cbf12e86 100644 --- a/tests/third_party/cupy/random_tests/test_distributions.py +++ b/tests/third_party/cupy/random_tests/test_distributions.py @@ -7,70 +7,79 @@ from tests.third_party.cupy import testing from tests.third_party.cupy.testing import helper - _regular_float_dtypes = (numpy.float64, numpy.float32) _float_dtypes = _regular_float_dtypes + (numpy.float16,) -_signed_dtypes = tuple(numpy.dtype(i).type for i in 'bhilq') -_unsigned_dtypes = tuple(numpy.dtype(i).type for i in 'BHILQ') +_signed_dtypes = tuple(numpy.dtype(i).type for i in "bhilq") +_unsigned_dtypes = tuple(numpy.dtype(i).type for i in "BHILQ") _int_dtypes = _signed_dtypes + _unsigned_dtypes class RandomDistributionsTestCase(unittest.TestCase): def check_distribution(self, dist_name, params): cp_params = {k: cupy.asarray(params[k]) for k in params} - np_out = numpy.asarray(getattr(numpy.random, dist_name)(size=self.shape, **params)) - cp_out = getattr(_distributions, dist_name)(size=self.shape, **cp_params) + np_out = numpy.asarray( + getattr(numpy.random, dist_name)(size=self.shape, **params) + ) + cp_out = getattr(_distributions, dist_name)( + size=self.shape, **cp_params + ) self.assertEqual(cp_out.shape, np_out.shape) self.assertEqual(cp_out.dtype, np_out.dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'a_shape': [(), (3, 2)], - 'b_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "a_shape": [(), (3, 2)], + "b_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsBeta(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['a_dtype', 'b_dtype']) + _regular_float_dtypes, names=["a_dtype", "b_dtype"] + ) def test_beta(self, a_dtype, b_dtype): a = numpy.full(self.a_shape, 3, dtype=a_dtype) b = numpy.full(self.b_shape, 3, dtype=b_dtype) - self.check_distribution('beta', {'a': a, 'b': b}) + self.check_distribution("beta", {"a": a, "b": b}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'n_shape': [(), (3, 2)], - 'p_shape': [(), (3, 2)], - 'dtype': _int_dtypes, # to escape timeout -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "n_shape": [(), (3, 2)], + "p_shape": [(), (3, 2)], + "dtype": _int_dtypes, # to escape timeout + } + ) ) @testing.gpu class TestDistributionsBinomial(RandomDistributionsTestCase): - - @helper.for_signed_dtypes('n_dtype') - @helper.for_float_dtypes('p_dtype') + @helper.for_signed_dtypes("n_dtype") + @helper.for_float_dtypes("p_dtype") def test_binomial(self, n_dtype, p_dtype): - if numpy.dtype('l') == numpy.int32 and n_dtype == numpy.int64: - self.skipTest('n must be able to cast to long') + if numpy.dtype("l") == numpy.int32 and n_dtype == numpy.int64: + self.skipTest("n must be able to cast to long") n = numpy.full(self.n_shape, 5, dtype=n_dtype) p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution('binomial', - {'n': n, 'p': p}, self.dtype) + self.check_distribution("binomial", {"n": n, "p": p}, self.dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'df_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "df_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsChisquare(unittest.TestCase): - def check_distribution(self, dist_func, df_dtype): df = cupy.full(self.df_shape, 5, dtype=df_dtype) out = dist_func(df, self.shape) @@ -78,58 +87,62 @@ def check_distribution(self, dist_func, df_dtype): # numpy and dpdp output dtype is float64 self.assertEqual(out.dtype, numpy.float64) - @helper.for_float_dtypes('df_dtype') + @helper.for_float_dtypes("df_dtype") def test_chisquare(self, df_dtype): self.check_distribution(_distributions.chisquare, df_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2, 3), (3, 2, 3)], - 'alpha_shape': [(3,)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2, 3), (3, 2, 3)], + "alpha_shape": [(3,)], + } + ) ) @testing.gpu class TestDistributionsDirichlet(RandomDistributionsTestCase): - - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['alpha_dtype']) + @helper.for_dtypes_combination(_regular_float_dtypes, names=["alpha_dtype"]) def test_dirichlet(self, alpha_dtype): alpha = numpy.ones(self.alpha_shape, dtype=alpha_dtype) - self.check_distribution('dirichlet', {'alpha': alpha}) + self.check_distribution("dirichlet", {"alpha": alpha}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2), None], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2), None], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsExponential(RandomDistributionsTestCase): - - @helper.for_float_dtypes('scale_dtype') + @helper.for_float_dtypes("scale_dtype") def test_exponential(self, scale_dtype): scale = numpy.ones(self.scale_shape, dtype=scale_dtype) - self.check_distribution('exponential', {'scale': scale}) + self.check_distribution("exponential", {"scale": scale}) @testing.gpu class TestDistributionsExponentialError(RandomDistributionsTestCase): - def test_negative_scale(self): scale = cupy.array([2, -1, 3], dtype=numpy.float32) with self.assertRaises(ValueError): cupy.random.exponential(scale) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'dfnum_shape': [(), (3, 2)], - 'dfden_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "dfnum_shape": [(), (3, 2)], + "dfden_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsF(unittest.TestCase): - def check_distribution(self, dist_func, dfnum_dtype, dfden_dtype): dfnum = cupy.ones(self.dfnum_shape, dtype=dfnum_dtype) dfden = cupy.ones(self.dfden_shape, dtype=dfden_dtype) @@ -138,22 +151,23 @@ def check_distribution(self, dist_func, dfnum_dtype, dfden_dtype): # numpy and dpdp output dtype is float64 self.assertEqual(out.dtype, numpy.float64) - @helper.for_float_dtypes('dfnum_dtype') - @helper.for_float_dtypes('dfden_dtype') + @helper.for_float_dtypes("dfnum_dtype") + @helper.for_float_dtypes("dfden_dtype") def test_f(self, dfnum_dtype, dfden_dtype): - self.check_distribution(_distributions.f, - dfnum_dtype, dfden_dtype) + self.check_distribution(_distributions.f, dfnum_dtype, dfden_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'shape_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "shape_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsGamma(unittest.TestCase): - def check_distribution(self, dist_func, shape_dtype, scale_dtype): shape = cupy.ones(self.shape_shape, dtype=shape_dtype) scale = cupy.ones(self.scale_shape, dtype=scale_dtype) @@ -163,19 +177,22 @@ def check_distribution(self, dist_func, shape_dtype, scale_dtype): self.assertEqual(out.dtype, numpy.float64) @helper.for_dtypes_combination( - _regular_float_dtypes, names=['shape_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["shape_dtype", "scale_dtype"] + ) def test_gamma(self, shape_dtype, scale_dtype): self.check_distribution(_distributions.gamma, shape_dtype, scale_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'p_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "p_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsGeometric(unittest.TestCase): - def check_distribution(self, dist_func, p_dtype): p = 0.5 * cupy.ones(self.p_shape, dtype=p_dtype) out = dist_func(p, self.shape) @@ -183,41 +200,47 @@ def check_distribution(self, dist_func, p_dtype): # numpy output dtype is int64, dpnp output is int32 self.assertEqual(out.dtype, numpy.int64) - @helper.for_float_dtypes('p_dtype') + @helper.for_float_dtypes("p_dtype") def test_geometric(self, p_dtype): self.check_distribution(_distributions.geometric, p_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'loc_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "loc_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsGumbel(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['loc_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + ) def test_gumbel(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) scale = numpy.ones(self.scale_shape, dtype=scale_dtype) - self.check_distribution('gumbel', {'loc': loc, 'scale': scale}) + self.check_distribution("gumbel", {"loc": loc, "scale": scale}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'ngood_shape': [(), (3, 2)], - 'nbad_shape': [(), (3, 2)], - 'nsample_shape': [(), (3, 2)], - 'nsample_dtype': [numpy.int32, numpy.int64], # to escape timeout -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "ngood_shape": [(), (3, 2)], + "nbad_shape": [(), (3, 2)], + "nsample_shape": [(), (3, 2)], + "nsample_dtype": [numpy.int32, numpy.int64], # to escape timeout + } + ) ) @testing.gpu class TestDistributionsHyperGeometric(unittest.TestCase): - - def check_distribution(self, dist_func, ngood_dtype, nbad_dtype, - nsample_dtype): + def check_distribution( + self, dist_func, ngood_dtype, nbad_dtype, nsample_dtype + ): ngood = cupy.ones(self.ngood_shape, dtype=ngood_dtype) nbad = cupy.ones(self.nbad_shape, dtype=nbad_dtype) nsample = cupy.ones(self.nsample_shape, dtype=nsample_dtype) @@ -227,94 +250,112 @@ def check_distribution(self, dist_func, ngood_dtype, nbad_dtype, self.assertEqual(out.dtype, numpy.int64) @helper.for_dtypes_combination( - [numpy.int32, numpy.int64], names=['ngood_dtype', 'nbad_dtype']) + [numpy.int32, numpy.int64], names=["ngood_dtype", "nbad_dtype"] + ) def test_hypergeometric(self, ngood_dtype, nbad_dtype): - self.check_distribution(_distributions.hypergeometric, ngood_dtype, - nbad_dtype, self.nsample_dtype) + self.check_distribution( + _distributions.hypergeometric, + ngood_dtype, + nbad_dtype, + self.nsample_dtype, + ) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'loc_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "loc_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsuLaplace(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['loc_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + ) def test_laplace(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) scale = numpy.ones(self.scale_shape, dtype=scale_dtype) - self.check_distribution('laplace', {'loc': loc, 'scale': scale}) + self.check_distribution("laplace", {"loc": loc, "scale": scale}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'loc_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "loc_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsLogistic(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['loc_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + ) def test_logistic(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) scale = numpy.ones(self.scale_shape, dtype=scale_dtype) - self.check_distribution('logistic', {'loc': loc, 'scale': scale}) + self.check_distribution("logistic", {"loc": loc, "scale": scale}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'mean_shape': [()], - 'sigma_shape': [()], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "mean_shape": [()], + "sigma_shape": [()], + } + ) ) @testing.gpu class TestDistributionsLognormal(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['mean_dtype', 'sigma_dtype']) + _regular_float_dtypes, names=["mean_dtype", "sigma_dtype"] + ) def test_lognormal(self, mean_dtype, sigma_dtype): mean = numpy.ones(self.mean_shape, dtype=mean_dtype) sigma = numpy.ones(self.sigma_shape, dtype=sigma_dtype) - self.check_distribution('lognormal', {'mean': mean, 'sigma': sigma}) + self.check_distribution("lognormal", {"mean": mean, "sigma": sigma}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'p_shape': [()], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "p_shape": [()], + } + ) ) @testing.gpu class TestDistributionsLogseries(RandomDistributionsTestCase): - - @helper.for_float_dtypes('p_dtype', no_float16=True) + @helper.for_float_dtypes("p_dtype", no_float16=True) def test_logseries(self, p_dtype): p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution('logseries', {'p': p}) + self.check_distribution("logseries", {"p": p}) - @helper.for_float_dtypes('p_dtype', no_float16=True) + @helper.for_float_dtypes("p_dtype", no_float16=True) def test_logseries_for_invalid_p(self, p_dtype): with self.assertRaises(ValueError): - cp_params = {'p': cupy.zeros(self.p_shape, dtype=p_dtype)} + cp_params = {"p": cupy.zeros(self.p_shape, dtype=p_dtype)} _distributions.logseries(size=self.shape, **cp_params) with self.assertRaises(ValueError): - cp_params = {'p': cupy.ones(self.p_shape, dtype=p_dtype)} + cp_params = {"p": cupy.ones(self.p_shape, dtype=p_dtype)} _distributions.logseries(size=self.shape, **cp_params) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'd': [2, 4], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "d": [2, 4], + } + ) ) @testing.gpu class TestDistributionsMultivariateNormal(unittest.TestCase): - def check_distribution(self, dist_func, mean_dtype, cov_dtype): mean = cupy.zeros(self.d, dtype=mean_dtype) cov = cupy.random.normal(size=(self.d, self.d)) @@ -329,140 +370,149 @@ def check_distribution(self, dist_func, mean_dtype, cov_dtype): # numpy and dpdp output dtype is float64 self.assertEqual(out.dtype, numpy.float64) - @helper.for_float_dtypes('mean_dtype', no_float16=True) - @helper.for_float_dtypes('cov_dtype', no_float16=True) + @helper.for_float_dtypes("mean_dtype", no_float16=True) + @helper.for_float_dtypes("cov_dtype", no_float16=True) def test_normal(self, mean_dtype, cov_dtype): - self.check_distribution(_distributions.multivariate_normal, - mean_dtype, cov_dtype) + self.check_distribution( + _distributions.multivariate_normal, mean_dtype, cov_dtype + ) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'n_shape': [(), (3, 2)], - 'p_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "n_shape": [(), (3, 2)], + "p_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsNegativeBinomial(RandomDistributionsTestCase): - - @helper.for_float_dtypes('n_dtype') - @helper.for_float_dtypes('p_dtype') + @helper.for_float_dtypes("n_dtype") + @helper.for_float_dtypes("p_dtype") def test_negative_binomial(self, n_dtype, p_dtype): n = numpy.full(self.n_shape, 5, dtype=n_dtype) p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution('negative_binomial', - {'n': n, 'p': p}) + self.check_distribution("negative_binomial", {"n": n, "p": p}) - @helper.for_float_dtypes('n_dtype') - @helper.for_float_dtypes('p_dtype') + @helper.for_float_dtypes("n_dtype") + @helper.for_float_dtypes("p_dtype") def test_negative_binomial_for_noninteger_n(self, n_dtype, p_dtype): n = numpy.full(self.n_shape, 5.5, dtype=n_dtype) p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution('negative_binomial', - {'n': n, 'p': p}) + self.check_distribution("negative_binomial", {"n": n, "p": p}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'df_shape': [(), (3, 2)], - 'nonc_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "df_shape": [(), (3, 2)], + "nonc_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsNoncentralChisquare(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['df_dtype', 'nonc_dtype']) + _regular_float_dtypes, names=["df_dtype", "nonc_dtype"] + ) def test_noncentral_chisquare(self, df_dtype, nonc_dtype): df = numpy.full(self.df_shape, 1, dtype=df_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=nonc_dtype) - self.check_distribution('noncentral_chisquare', - {'df': df, 'nonc': nonc}) + self.check_distribution( + "noncentral_chisquare", {"df": df, "nonc": nonc} + ) - @helper.for_float_dtypes('param_dtype', no_float16=True) + @helper.for_float_dtypes("param_dtype", no_float16=True) def test_noncentral_chisquare_for_invalid_params(self, param_dtype): df = cupy.full(self.df_shape, -1, dtype=param_dtype) nonc = cupy.full(self.nonc_shape, 1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.noncentral_chisquare( - df, nonc, size=self.shape) + _distributions.noncentral_chisquare(df, nonc, size=self.shape) df = cupy.full(self.df_shape, 1, dtype=param_dtype) nonc = cupy.full(self.nonc_shape, -1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.noncentral_chisquare( - df, nonc, size=self.shape) + _distributions.noncentral_chisquare(df, nonc, size=self.shape) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'dfnum_shape': [(), (3, 2)], - 'dfden_shape': [(), (3, 2)], - 'nonc_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "dfnum_shape": [(), (3, 2)], + "dfden_shape": [(), (3, 2)], + "nonc_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsNoncentralF(RandomDistributionsTestCase): - @helper.for_dtypes_combination( _regular_float_dtypes, - names=['dfnum_dtype', 'dfden_dtype', 'nonc_dtype']) + names=["dfnum_dtype", "dfden_dtype", "nonc_dtype"], + ) def test_noncentral_f(self, dfnum_dtype, dfden_dtype, nonc_dtype): dfnum = numpy.full(self.dfnum_shape, 1, dtype=dfnum_dtype) dfden = numpy.full(self.dfden_shape, 1, dtype=dfden_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=nonc_dtype) - self.check_distribution('noncentral_f', - {'dfnum': dfnum, 'dfden': dfden, 'nonc': nonc}) + self.check_distribution( + "noncentral_f", {"dfnum": dfnum, "dfden": dfden, "nonc": nonc} + ) - @helper.for_float_dtypes('param_dtype', no_float16=True) + @helper.for_float_dtypes("param_dtype", no_float16=True) def test_noncentral_f_for_invalid_params(self, param_dtype): dfnum = numpy.full(self.dfnum_shape, -1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, 1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.noncentral_f( - dfnum, dfden, nonc, size=self.shape) + _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) dfnum = numpy.full(self.dfnum_shape, 1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, -1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.noncentral_f( - dfnum, dfden, nonc, size=self.shape) + _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) dfnum = numpy.full(self.dfnum_shape, 1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, 1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, -1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.noncentral_f( - dfnum, dfden, nonc, size=self.shape) + _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'loc_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "loc_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsNormal(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['loc_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + ) def test_normal(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) scale = numpy.ones(self.scale_shape, dtype=scale_dtype) - self.check_distribution('normal', {'loc': loc, 'scale': scale}) + self.check_distribution("normal", {"loc": loc, "scale": scale}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'a_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "a_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsPareto(unittest.TestCase): - def check_distribution(self, dist_func, a_dtype): a = cupy.ones(self.a_shape, dtype=a_dtype) out = dist_func(a, self.shape) @@ -470,20 +520,21 @@ def check_distribution(self, dist_func, a_dtype): # numpy and dpdp output dtype is float64 self.assertEqual(out.dtype, numpy.float64) - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_pareto(self, a_dtype): - self.check_distribution(_distributions.pareto, - a_dtype) + self.check_distribution(_distributions.pareto, a_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'lam_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "lam_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsPoisson(unittest.TestCase): - def check_distribution(self, dist_func, lam_dtype): lam = cupy.full(self.lam_shape, 5, dtype=lam_dtype) out = dist_func(lam, self.shape) @@ -491,115 +542,127 @@ def check_distribution(self, dist_func, lam_dtype): # numpy output dtype is int64, dpnp output is int32 self.assertEqual(out.dtype, numpy.int64) - @helper.for_float_dtypes('lam_dtype') + @helper.for_float_dtypes("lam_dtype") def test_poisson(self, lam_dtype): self.check_distribution(_distributions.poisson, lam_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'a_shape': [()], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "a_shape": [()], + } + ) ) @testing.gpu class TestDistributionsPower(RandomDistributionsTestCase): - - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_power(self, a_dtype): a = numpy.full(self.a_shape, 0.5, dtype=a_dtype) - self.check_distribution('power', {'a': a}) + self.check_distribution("power", {"a": a}) - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_power_for_negative_a(self, a_dtype): a = numpy.full(self.a_shape, -0.5, dtype=a_dtype) with self.assertRaises(ValueError): - cp_params = {'a': cupy.asarray(a)} - getattr(_distributions, 'power')( - size=self.shape, **cp_params) + cp_params = {"a": cupy.asarray(a)} + _distributions.power(size=self.shape, **cp_params) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsRayleigh(RandomDistributionsTestCase): - - @helper.for_float_dtypes('scale_dtype') + @helper.for_float_dtypes("scale_dtype") def test_rayleigh(self, scale_dtype): scale = numpy.full(self.scale_shape, 3, dtype=scale_dtype) - self.check_distribution('rayleigh', {'scale': scale}) + self.check_distribution("rayleigh", {"scale": scale}) - @helper.for_float_dtypes('scale_dtype') + @helper.for_float_dtypes("scale_dtype") def test_rayleigh_for_zero_scale(self, scale_dtype): scale = numpy.zeros(self.scale_shape, dtype=scale_dtype) - self.check_distribution('rayleigh', {'scale': scale}) + self.check_distribution("rayleigh", {"scale": scale}) - @helper.for_float_dtypes('scale_dtype') + @helper.for_float_dtypes("scale_dtype") def test_rayleigh_for_negative_scale(self, scale_dtype): scale = numpy.full(self.scale_shape, -0.5, dtype=scale_dtype) with self.assertRaises(ValueError): - cp_params = {'scale': cupy.asarray(scale)} + cp_params = {"scale": cupy.asarray(scale)} _distributions.rayleigh(size=self.shape, **cp_params) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsStandardCauchy(RandomDistributionsTestCase): - def test_standard_cauchy(self): - self.check_distribution('standard_cauchy', {}) + self.check_distribution("standard_cauchy", {}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsStandardExponential(RandomDistributionsTestCase): - def test_standard_exponential(self): - self.check_distribution('standard_exponential', {}) + self.check_distribution("standard_exponential", {}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'shape_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "shape_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsStandardGamma(RandomDistributionsTestCase): - - @helper.for_float_dtypes('shape_dtype') + @helper.for_float_dtypes("shape_dtype") def test_standard_gamma(self, shape_dtype): shape = numpy.ones(self.shape_shape, dtype=shape_dtype) - self.check_distribution('standard_gamma', - {'shape': shape}) + self.check_distribution("standard_gamma", {"shape": shape}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsStandardNormal(RandomDistributionsTestCase): - def test_standard_normal(self): - self.check_distribution('standard_normal', {}) + self.check_distribution("standard_normal", {}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'df_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "df_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsStandardT(unittest.TestCase): - def check_distribution(self, dist_func, df_dtype): df = cupy.ones(self.df_shape, dtype=df_dtype) out = dist_func(df, self.shape) @@ -607,81 +670,86 @@ def check_distribution(self, dist_func, df_dtype): # numpy and dpdp output dtype is float64 self.assertEqual(out.dtype, numpy.float64) - @helper.for_float_dtypes('df_dtype') + @helper.for_float_dtypes("df_dtype") def test_standard_t(self, df_dtype): self.check_distribution(_distributions.standard_t, df_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'left_shape': [(), (3, 2)], - 'mode_shape': [(), (3, 2)], - 'right_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "left_shape": [(), (3, 2)], + "mode_shape": [(), (3, 2)], + "right_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsTriangular(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, - names=['left_dtype', 'mode_dtype', 'right_dtype']) + _regular_float_dtypes, names=["left_dtype", "mode_dtype", "right_dtype"] + ) def test_triangular(self, left_dtype, mode_dtype, right_dtype): left = numpy.full(self.left_shape, -1, dtype=left_dtype) mode = numpy.full(self.mode_shape, 0, dtype=mode_dtype) right = numpy.full(self.right_shape, 2, dtype=right_dtype) - self.check_distribution('triangular', - {'left': left, 'mode': mode, 'right': right}) + self.check_distribution( + "triangular", {"left": left, "mode": mode, "right": right} + ) - @helper.for_float_dtypes('param_dtype', no_float16=True) + @helper.for_float_dtypes("param_dtype", no_float16=True) def test_triangular_for_invalid_params(self, param_dtype): left = cupy.full(self.left_shape, 1, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, 2, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.triangular( - left, mode, right, size=self.shape) + _distributions.triangular(left, mode, right, size=self.shape) left = cupy.full(self.left_shape, -2, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, -1, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.triangular( - left, mode, right, size=self.shape) + _distributions.triangular(left, mode, right, size=self.shape) left = cupy.full(self.left_shape, 0, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, 0, dtype=param_dtype) with self.assertRaises(ValueError): - _distributions.triangular( - left, mode, right, size=self.shape) + _distributions.triangular(left, mode, right, size=self.shape) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'low_shape': [(), (3, 2)], - 'high_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "low_shape": [(), (3, 2)], + "high_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsUniform(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['low_dtype', 'high_dtype']) + _regular_float_dtypes, names=["low_dtype", "high_dtype"] + ) def test_uniform(self, low_dtype, high_dtype): low = numpy.ones(self.low_shape, dtype=low_dtype) - high = numpy.ones(self.high_shape, dtype=high_dtype) * 2. - self.check_distribution('uniform', {'low': low, 'high': high}) + high = numpy.ones(self.high_shape, dtype=high_dtype) * 2.0 + self.check_distribution("uniform", {"low": low, "high": high}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'mu_shape': [(), (3, 2)], - 'kappa_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "mu_shape": [(), (3, 2)], + "kappa_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsVonmises(unittest.TestCase): - def check_distribution(self, dist_func, mu_dtype, kappa_dtype): mu = cupy.ones(self.mu_shape, dtype=mu_dtype) kappa = cupy.ones(self.kappa_shape, dtype=kappa_dtype) @@ -691,66 +759,72 @@ def check_distribution(self, dist_func, mu_dtype, kappa_dtype): self.assertEqual(out.dtype, numpy.float64) @helper.for_dtypes_combination( - _regular_float_dtypes, names=['mu_dtype', 'kappa_dtype']) + _regular_float_dtypes, names=["mu_dtype", "kappa_dtype"] + ) def test_vonmises(self, mu_dtype, kappa_dtype): - self.check_distribution(_distributions.vonmises, - mu_dtype, kappa_dtype) + self.check_distribution(_distributions.vonmises, mu_dtype, kappa_dtype) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'mean_shape': [(), (3, 2)], - 'scale_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "mean_shape": [(), (3, 2)], + "scale_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsWald(RandomDistributionsTestCase): - @helper.for_dtypes_combination( - _regular_float_dtypes, names=['mean_dtype', 'scale_dtype']) + _regular_float_dtypes, names=["mean_dtype", "scale_dtype"] + ) def test_wald(self, mean_dtype, scale_dtype): mean = numpy.full(self.mean_shape, 3, dtype=mean_dtype) scale = numpy.full(self.scale_shape, 3, dtype=scale_dtype) - self.check_distribution('wald', - {'mean': mean, 'scale': scale}) + self.check_distribution("wald", {"mean": mean, "scale": scale}) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'a_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "a_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsWeibull(RandomDistributionsTestCase): - - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_weibull(self, a_dtype): a = numpy.ones(self.a_shape, dtype=a_dtype) - self.check_distribution('weibull', {'a': a}) + self.check_distribution("weibull", {"a": a}) - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_weibull_for_inf_a(self, a_dtype): a = numpy.full(self.a_shape, numpy.inf, dtype=a_dtype) - self.check_distribution('weibull', {'a': a}) + self.check_distribution("weibull", {"a": a}) - @helper.for_float_dtypes('a_dtype') + @helper.for_float_dtypes("a_dtype") def test_weibull_for_negative_a(self, a_dtype): a = numpy.full(self.a_shape, -0.5, dtype=a_dtype) with self.assertRaises(ValueError): - cp_params = {'a': cupy.asarray(a)} - getattr(_distributions, 'weibull')(size=self.shape, **cp_params) + cp_params = {"a": cupy.asarray(a)} + _distributions.weibull(size=self.shape, **cp_params) -@testing.parameterize(*testing.product({ - 'shape': [(4, 3, 2), (3, 2)], - 'a_shape': [(), (3, 2)], -}) +@testing.parameterize( + *testing.product( + { + "shape": [(4, 3, 2), (3, 2)], + "a_shape": [(), (3, 2)], + } + ) ) @testing.gpu class TestDistributionsZipf(RandomDistributionsTestCase): - - @helper.for_dtypes([numpy.int32, numpy.int64], 'dtype') - @helper.for_float_dtypes('a_dtype') + @helper.for_dtypes([numpy.int32, numpy.int64], "dtype") + @helper.for_float_dtypes("a_dtype") def test_zipf(self, a_dtype, dtype): a = numpy.full(self.a_shape, 2, dtype=a_dtype) - self.check_distribution('zipf', {'a': a}) + self.check_distribution("zipf", {"a": a}) diff --git a/tests/third_party/cupy/random_tests/test_sample.py b/tests/third_party/cupy/random_tests/test_sample.py index 50b89d36cfc3..b77f7cab227f 100644 --- a/tests/third_party/cupy/random_tests/test_sample.py +++ b/tests/third_party/cupy/random_tests/test_sample.py @@ -6,13 +6,11 @@ import dpnp as cupy from dpnp import random from tests.third_party.cupy import testing -from tests.third_party.cupy.testing import condition -from tests.third_party.cupy.testing import hypothesis +from tests.third_party.cupy.testing import condition, hypothesis @testing.gpu class TestRandint(unittest.TestCase): - def test_lo_hi_reversed(self): with self.assertRaises(ValueError): random.randint(100, 1) @@ -43,7 +41,6 @@ def test_zero_sizes(self): # @testing.fix_random() @testing.gpu class TestRandint2(unittest.TestCase): - @condition.repeat(3, 10) def test_bound_1(self): vals = [random.randint(0, 10, (2, 3)) for _ in range(10)] @@ -94,7 +91,9 @@ def test_goodness_of_fit(self): # vals = [random.randint(mx).get() for _ in range(trial)] # counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] vals = [random.randint(mx) for _ in range(trial)] - counts = numpy.histogram([i[0] for i in vals], bins=numpy.arange(mx + 1))[0] + counts = numpy.histogram( + [i[0] for i in vals], bins=numpy.arange(mx + 1) + )[0] expected = numpy.array([float(trial) / mx] * mx) self.assertTrue(hypothesis.chi_square_test(counts, expected)) @@ -104,7 +103,9 @@ def test_goodness_of_fit_2(self): # vals = random.randint(mx, size=(5, 20)).get() # counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] vals = random.randint(mx, size=(5, 20)).reshape(5 * 20) - counts = numpy.histogram(numpy.asarray(vals), bins=numpy.arange(mx + 1))[0] + counts = numpy.histogram( + numpy.asarray(vals), bins=numpy.arange(mx + 1) + )[0] expected = numpy.array([float(vals.size) / mx] * mx) self.assertTrue(hypothesis.chi_square_test(counts, expected)) @@ -150,19 +151,18 @@ def test_dtype2(self, dtype): @testing.gpu class TestRandomIntegers(unittest.TestCase): - def test_normal(self): - with mock.patch('dpnp.random.sample_.randint') as m: + with mock.patch("dpnp.random.sample_.randint") as m: random.random_integers(3, 5) m.assert_called_with(3, 6, None) def test_high_is_none(self): - with mock.patch('dpnp.random.sample_.randint') as m: + with mock.patch("dpnp.random.sample_.randint") as m: random.random_integers(3, None) m.assert_called_with(1, 4, None) def test_size_is_not_none(self): - with mock.patch('dpnp.random.sample_.randint') as m: + with mock.patch("dpnp.random.sample_.randint") as m: random.random_integers(3, 5, (1, 2, 3)) m.assert_called_with(3, 6, (1, 2, 3)) @@ -170,7 +170,6 @@ def test_size_is_not_none(self): @testing.fix_random() @testing.gpu class TestRandomIntegers2(unittest.TestCase): - @condition.repeat(3, 10) def test_bound_1(self): vals = [random.random_integers(0, 10, (2, 3)).get() for _ in range(10)] @@ -207,7 +206,6 @@ def test_goodness_of_fit_2(self): @testing.gpu class TestChoice(unittest.TestCase): - def setUp(self): self.rs_tmp = random.generator._random_states device_id = cuda.Device().id @@ -253,54 +251,50 @@ def test_no_none(self): # @testing.gpu class TestRandomSample(unittest.TestCase): - def test_rand(self): - with mock.patch('dpnp.random.sample_.random_sample') as m: + with mock.patch("dpnp.random.sample_.random_sample") as m: random.rand(1, 2, 3, dtype=numpy.float32) - m.assert_called_once_with( - size=(1, 2, 3), dtype=numpy.float32) + m.assert_called_once_with(size=(1, 2, 3), dtype=numpy.float32) def test_rand_default_dtype(self): - with mock.patch('dpnp.random.sample_.random_sample') as m: + with mock.patch("dpnp.random.sample_.random_sample") as m: random.rand(1, 2, 3) - m.assert_called_once_with( - size=(1, 2, 3), dtype=float) + m.assert_called_once_with(size=(1, 2, 3), dtype=float) def test_rand_invalid_argument(self): with self.assertRaises(TypeError): - random.rand(1, 2, 3, unnecessary='unnecessary_argument') + random.rand(1, 2, 3, unnecessary="unnecessary_argument") def test_randn(self): - with mock.patch('dpnp.random.normal') as m: + with mock.patch("dpnp.random.normal") as m: random.randn(1, 2, 3) m.assert_called_once_with(size=(1, 2, 3)) def test_randn_default_dtype(self): - with mock.patch('dpnp.random.normal') as m: + with mock.patch("dpnp.random.normal") as m: random.randn(1, 2, 3) m.assert_called_once_with(size=(1, 2, 3)) def test_randn_invalid_argument(self): with self.assertRaises(TypeError): - random.randn(1, 2, 3, unnecessary='unnecessary_argument') + random.randn(1, 2, 3, unnecessary="unnecessary_argument") @testing.parameterize( - {'size': None}, - {'size': ()}, - {'size': 4}, - {'size': (0,)}, - {'size': (1, 0)}, + {"size": None}, + {"size": ()}, + {"size": 4}, + {"size": (0,)}, + {"size": (1, 0)}, ) @testing.fix_random() @testing.gpu class TestMultinomial(unittest.TestCase): - @condition.repeat(3, 10) @testing.for_float_dtypes() @testing.numpy_cupy_allclose(rtol=0.05) def test_multinomial(self, xp, dtype): pvals = xp.array([0.2, 0.3, 0.5], dtype) x = xp.random.multinomial(100000, pvals, self.size) - self.assertEqual(x.dtype, 'l') + self.assertEqual(x.dtype, "l") return x / 100000 diff --git a/tests/third_party/cupy/sorting_tests/test_count.py b/tests/third_party/cupy/sorting_tests/test_count.py index 58ca505cba9d..368676102da0 100644 --- a/tests/third_party/cupy/sorting_tests/test_count.py +++ b/tests/third_party/cupy/sorting_tests/test_count.py @@ -8,7 +8,6 @@ @testing.gpu class TestCount(unittest.TestCase): - @testing.for_all_dtypes() def test_count_nonzero(self, dtype): def func(xp): @@ -19,9 +18,10 @@ def func(xp): # CuPy returns zero-dimensional array instead of # returning a scalar value self.assertIsInstance(c, xp.ndarray) - self.assertEqual(c.dtype, 'l') + self.assertEqual(c.dtype, "l") self.assertEqual(c.shape, ()) return int(c) + self.assertEqual(func(numpy), func(cupy)) @testing.for_all_dtypes() @@ -33,18 +33,21 @@ def func(xp): # CuPy returns zero-dimensional array instead of # returning a scalar value self.assertIsInstance(c, xp.ndarray) - self.assertEqual(c.dtype, 'l') + self.assertEqual(c.dtype, "l") self.assertEqual(c.shape, ()) return int(c) + self.assertEqual(func(numpy), func(cupy)) @testing.for_all_dtypes() def test_count_nonzero_int_axis(self, dtype): for ax in range(3): + def func(xp): m = testing.shaped_random((2, 3, 4), xp, xp.bool_) a = testing.shaped_random((2, 3, 4), xp, dtype) * m return xp.count_nonzero(a, axis=ax) + testing.assert_allclose(func(numpy), func(cupy)) @testing.for_all_dtypes() @@ -58,4 +61,5 @@ def func(xp): m = testing.shaped_random((2, 3, 4), xp, xp.bool_) a = testing.shaped_random((2, 3, 4), xp, dtype) * m return xp.count_nonzero(a, axis=(ax, ay)) + testing.assert_allclose(func(numpy), func(cupy)) diff --git a/tests/third_party/cupy/sorting_tests/test_search.py b/tests/third_party/cupy/sorting_tests/test_search.py index d9e80f3e87d6..f997568734dd 100644 --- a/tests/third_party/cupy/sorting_tests/test_search.py +++ b/tests/third_party/cupy/sorting_tests/test_search.py @@ -5,12 +5,12 @@ import dpnp as cupy from tests.third_party.cupy import testing + # from cupy.core import _accelerator @testing.gpu class TestSearch(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_all(self, xp, dtype): @@ -26,7 +26,7 @@ def test_external_argmax_all(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_argmax_nan(self, xp, dtype): - a = xp.array([float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), -1, 1], dtype) return a.argmax() @testing.for_all_dtypes(no_complex=True) @@ -94,7 +94,7 @@ def test_argmin_all(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_argmin_nan(self, xp, dtype): - a = xp.array([float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), -1, 1], dtype) return a.argmin() @testing.for_all_dtypes(no_complex=True) @@ -163,73 +163,77 @@ def test_argmin_zero_size_axis1(self, xp, dtype): # This class compares CUB results against NumPy's # TODO(leofang): test axis after support is added # @testing.parameterize(*testing.product({ - # 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], - # 'order': ('C', 'F'), +# 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], +# 'order': ('C', 'F'), # })) # @testing.gpu # @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled') # class TestCubReduction(unittest.TestCase): - # def setUp(self): - # self.old_accelerators = _accelerator.get_routine_accelerators() - # _accelerator.set_routine_accelerators(['cub']) - - # def tearDown(self): - # _accelerator.set_routine_accelerators(self.old_accelerators) - - # @testing.for_dtypes('bhilBHILefdFD') - # @testing.numpy_cupy_allclose(rtol=1E-5) - # def test_cub_argmin(self, xp, dtype): - # a = testing.shaped_random(self.shape, xp, dtype) - # if self.order == 'C': - # a = xp.ascontiguousarray(a) - # else: - # a = xp.asfortranarray(a) - - # if xp is numpy: - # return a.argmin() - - # # xp is cupy, first ensure we really use CUB - # ret = cupy.empty(()) # Cython checks return type, need to fool it - # func = 'cupy.core._routines_statistics.cub.device_reduce' - # with testing.AssertFunctionIsCalled(func, return_value=ret): - # a.argmin() - # # ...then perform the actual computation - # return a.argmin() - - # @testing.for_dtypes('bhilBHILefdFD') - # @testing.numpy_cupy_allclose(rtol=1E-5) - # def test_cub_argmax(self, xp, dtype): - # a = testing.shaped_random(self.shape, xp, dtype) - # if self.order == 'C': - # a = xp.ascontiguousarray(a) - # else: - # a = xp.asfortranarray(a) - - # if xp is numpy: - # return a.argmax() - - # # xp is cupy, first ensure we really use CUB - # ret = cupy.empty(()) # Cython checks return type, need to fool it - # func = 'cupy.core._routines_statistics.cub.device_reduce' - # with testing.AssertFunctionIsCalled(func, return_value=ret): - # a.argmax() - # # ...then perform the actual computation - # return a.argmax() +# def setUp(self): +# self.old_accelerators = _accelerator.get_routine_accelerators() +# _accelerator.set_routine_accelerators(['cub']) + +# def tearDown(self): +# _accelerator.set_routine_accelerators(self.old_accelerators) + +# @testing.for_dtypes('bhilBHILefdFD') +# @testing.numpy_cupy_allclose(rtol=1E-5) +# def test_cub_argmin(self, xp, dtype): +# a = testing.shaped_random(self.shape, xp, dtype) +# if self.order == 'C': +# a = xp.ascontiguousarray(a) +# else: +# a = xp.asfortranarray(a) + +# if xp is numpy: +# return a.argmin() + +# # xp is cupy, first ensure we really use CUB +# ret = cupy.empty(()) # Cython checks return type, need to fool it +# func = 'cupy.core._routines_statistics.cub.device_reduce' +# with testing.AssertFunctionIsCalled(func, return_value=ret): +# a.argmin() +# # ...then perform the actual computation +# return a.argmin() + +# @testing.for_dtypes('bhilBHILefdFD') +# @testing.numpy_cupy_allclose(rtol=1E-5) +# def test_cub_argmax(self, xp, dtype): +# a = testing.shaped_random(self.shape, xp, dtype) +# if self.order == 'C': +# a = xp.ascontiguousarray(a) +# else: +# a = xp.asfortranarray(a) + +# if xp is numpy: +# return a.argmax() + +# # xp is cupy, first ensure we really use CUB +# ret = cupy.empty(()) # Cython checks return type, need to fool it +# func = 'cupy.core._routines_statistics.cub.device_reduce' +# with testing.AssertFunctionIsCalled(func, return_value=ret): +# a.argmax() +# # ...then perform the actual computation +# return a.argmax() @testing.gpu -@testing.parameterize(*testing.product({ - 'func': ['argmin', 'argmax'], - 'is_module': [True, False], - 'shape': [(3, 4), ()], -})) +@testing.parameterize( + *testing.product( + { + "func": ["argmin", "argmax"], + "is_module": [True, False], + "shape": [(3, 4), ()], + } + ) +) class TestArgMinMaxDtype(unittest.TestCase): - @testing.for_dtypes( dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64], - name='result_dtype') - @testing.for_all_dtypes(name='in_dtype') + name="result_dtype", + ) + @testing.for_all_dtypes(name="in_dtype") def test_argminmax_dtype(self, in_dtype, result_dtype): a = testing.shaped_random(self.shape, cupy, in_dtype) if self.is_module: @@ -243,16 +247,14 @@ def test_argminmax_dtype(self, in_dtype, result_dtype): @testing.parameterize( - {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}, - {'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}, - {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)}, - {'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)}, + {"cond_shape": (2, 3, 4), "x_shape": (2, 3, 4), "y_shape": (2, 3, 4)}, + {"cond_shape": (4,), "x_shape": (2, 3, 4), "y_shape": (2, 3, 4)}, + {"cond_shape": (2, 3, 4), "x_shape": (2, 3, 4), "y_shape": (3, 4)}, + {"cond_shape": (3, 4), "x_shape": (2, 3, 4), "y_shape": (4,)}, ) @testing.gpu class TestWhereTwoArrays(unittest.TestCase): - - @testing.for_all_dtypes_combination( - names=['cond_type', 'x_type', 'y_type']) + @testing.for_all_dtypes_combination(names=["cond_type", "x_type", "y_type"]) @testing.numpy_cupy_allclose() def test_where_two_arrays(self, xp, cond_type, x_type, y_type): m = testing.shaped_random(self.cond_shape, xp, xp.bool_) @@ -265,14 +267,13 @@ def test_where_two_arrays(self, xp, cond_type, x_type, y_type): @testing.parameterize( - {'cond_shape': (2, 3, 4)}, - {'cond_shape': (4,)}, - {'cond_shape': (2, 3, 4)}, - {'cond_shape': (3, 4)}, + {"cond_shape": (2, 3, 4)}, + {"cond_shape": (4,)}, + {"cond_shape": (2, 3, 4)}, + {"cond_shape": (3, 4)}, ) @testing.gpu class TestWhereCond(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_where_cond(self, xp, dtype): @@ -283,7 +284,6 @@ def test_where_cond(self, xp, dtype): @testing.gpu class TestWhereError(unittest.TestCase): - def test_one_argument(self): for xp in (numpy, cupy): cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_) @@ -293,13 +293,12 @@ def test_one_argument(self): @testing.parameterize( - {'array': numpy.empty((0,))}, - {'array': numpy.empty((0, 2))}, - {'array': numpy.empty((0, 2, 0))}, + {"array": numpy.empty((0,))}, + {"array": numpy.empty((0, 2))}, + {"array": numpy.empty((0, 2, 0))}, ) @testing.gpu class TestNonzero(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_nonzero(self, xp, dtype): @@ -308,13 +307,12 @@ def test_nonzero(self, xp, dtype): @testing.parameterize( - {'array': numpy.array(0)}, - {'array': numpy.array(1)}, + {"array": numpy.array(0)}, + {"array": numpy.array(1)}, ) @testing.gpu -@testing.with_requires('numpy>=1.17.0') +@testing.with_requires("numpy>=1.17.0") class TestNonzeroZeroDimension(unittest.TestCase): - @testing.for_all_dtypes() def test_nonzero(self, dtype): for xp in (numpy, cupy): @@ -324,15 +322,14 @@ def test_nonzero(self, dtype): @testing.parameterize( - {'array': numpy.array(0)}, - {'array': numpy.array(1)}, - {'array': numpy.empty((0,))}, - {'array': numpy.empty((0, 2))}, - {'array': numpy.empty((0, 2, 0))}, + {"array": numpy.array(0)}, + {"array": numpy.array(1)}, + {"array": numpy.empty((0,))}, + {"array": numpy.empty((0, 2))}, + {"array": numpy.empty((0, 2, 0))}, ) @testing.gpu class TestFlatNonzero(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_flatnonzero(self, xp, dtype): @@ -341,37 +338,36 @@ def test_flatnonzero(self, xp, dtype): @testing.parameterize( - {'array': numpy.empty((0,))}, - {'array': numpy.empty((0, 2))}, - {'array': numpy.empty((0, 2, 0))}, + {"array": numpy.empty((0,))}, + {"array": numpy.empty((0, 2))}, + {"array": numpy.empty((0, 2, 0))}, ) @testing.gpu class TestArgwhere(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_argwhere(self, xp, dtype): array = xp.array(self.array, dtype=dtype) return xp.argwhere(array) + # DPNP_BUG # dpnp/backend.pyx:86: in dpnp.backend.dpnp_array - # raise TypeError(f"Intel NumPy array(): Unsupported non-sequence obj={type(obj)}") +# raise TypeError(f"Intel NumPy array(): Unsupported non-sequence obj={type(obj)}") # E TypeError: Intel NumPy array(): Unsupported non-sequence obj= # @testing.parameterize( - # {'array': cupy.array(1)}, +# {'array': cupy.array(1)}, # ) # @testing.gpu # class TestArgwhereZeroDimension(unittest.TestCase): - # def test_argwhere(self): - # with testing.assert_warns(DeprecationWarning): - # return cupy.nonzero(self.array) +# def test_argwhere(self): +# with testing.assert_warns(DeprecationWarning): +# return cupy.nonzero(self.array) @testing.gpu class TestNanArgMin(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmin_all(self, xp, dtype): @@ -381,33 +377,33 @@ def test_nanargmin_all(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmin_nan(self, xp, dtype): - a = xp.array([float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), -1, 1], dtype) return xp.nanargmin(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmin_nan2(self, xp, dtype): - a = xp.array([float('nan'), float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), float("nan"), -1, 1], dtype) return xp.nanargmin(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmin_nan3(self, xp, dtype): - a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype) + a = xp.array([float("nan"), float("nan"), -1, 1, 1.0, -2.0], dtype) return xp.nanargmin(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmin_nan4(self, xp, dtype): - a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')], - dtype) + a = xp.array([-1, 1, 1.0, -2.0, float("nan"), float("nan")], dtype) return xp.nanargmin(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmin_nan5(self, xp, dtype): - a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1], - dtype) + a = xp.array( + [-1, 1, 1.0, -2.0, float("nan"), float("nan"), -1, 1], dtype + ) return xp.nanargmin(a) @testing.for_all_dtypes(no_complex=True) @@ -463,7 +459,6 @@ def test_nanargmin_zero_size_axis1(self, xp, dtype): @testing.gpu class TestNanArgMax(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmax_all(self, xp, dtype): @@ -473,33 +468,33 @@ def test_nanargmax_all(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmax_nan(self, xp, dtype): - a = xp.array([float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), -1, 1], dtype) return xp.nanargmax(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmax_nan2(self, xp, dtype): - a = xp.array([float('nan'), float('nan'), -1, 1], dtype) + a = xp.array([float("nan"), float("nan"), -1, 1], dtype) return xp.nanargmax(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmax_nan3(self, xp, dtype): - a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype) + a = xp.array([float("nan"), float("nan"), -1, 1, 1.0, -2.0], dtype) return xp.nanargmax(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmax_nan4(self, xp, dtype): - a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')], - dtype) + a = xp.array([-1, 1, 1.0, -2.0, float("nan"), float("nan")], dtype) return xp.nanargmax(a) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(accept_error=ValueError) def test_nanargmax_nan5(self, xp, dtype): - a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1], - dtype) + a = xp.array( + [-1, 1, 1.0, -2.0, float("nan"), float("nan"), -1, 1], dtype + ) return xp.nanargmax(a) @testing.for_all_dtypes(no_complex=True) @@ -554,98 +549,98 @@ def test_nanargmax_zero_size_axis1(self, xp, dtype): @testing.gpu -@testing.parameterize(*testing.product( - {'bins': [ - [], - [0, 1, 2, 4, 10], - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], - [0.0, 1.0, 2.5, 4.0, 10.0], - [-1.0, 1.0, 2.5, 4.0, 20.0], - [1.5, 2.5, 4.0, 6.0], - [float('-inf'), 1.5, 2.5, 4.0, 6.0], - [1.5, 2.5, 4.0, 6.0, float('inf')], - [float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')], - [0.0, 1.0, 1.0, 4.0, 4.0, 10.0], - [0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0], - ], - 'side': ['left', 'right'], - 'shape': [(), (10,), (6, 3, 3)]}) +@testing.parameterize( + *testing.product( + { + "bins": [ + [], + [0, 1, 2, 4, 10], + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + [0.0, 1.0, 2.5, 4.0, 10.0], + [-1.0, 1.0, 2.5, 4.0, 20.0], + [1.5, 2.5, 4.0, 6.0], + [float("-inf"), 1.5, 2.5, 4.0, 6.0], + [1.5, 2.5, 4.0, 6.0, float("inf")], + [float("-inf"), 1.5, 2.5, 4.0, 6.0, float("inf")], + [0.0, 1.0, 1.0, 4.0, 4.0, 10.0], + [0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0], + ], + "side": ["left", "right"], + "shape": [(), (10,), (6, 3, 3)], + } + ) ) class TestSearchSorted(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_searchsorted(self, xp, dtype): x = testing.shaped_arange(self.shape, xp, dtype) bins = xp.array(self.bins) y = xp.searchsorted(bins, x, side=self.side) - return y, + return (y,) @testing.gpu -@testing.parameterize( - {'side': 'left'}, - {'side': 'right'}) +@testing.parameterize({"side": "left"}, {"side": "right"}) class TestSearchSortedNanInf(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_searchsorted_nanbins(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) - bins = xp.array([0, 1, 2, 4, 10, float('nan')]) + bins = xp.array([0, 1, 2, 4, 10, float("nan")]) y = xp.searchsorted(bins, x, side=self.side) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_searchsorted_nan(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) - x[5] = float('nan') + x[5] = float("nan") bins = xp.array([0, 1, 2, 4, 10]) y = xp.searchsorted(bins, x, side=self.side) - return y, + return (y,) -# DPNP_BUG -# Segmentation fault on access to negative index # x[-1] = float('nan') ####### + # DPNP_BUG + # Segmentation fault on access to negative index # x[-1] = float('nan') ####### # @testing.numpy_cupy_array_equal() # def test_searchsorted_nan_last(self, xp): - # x = testing.shaped_arange((10,), xp, xp.float64) - # x[-1] = float('nan') - # bins = xp.array([0, 1, 2, 4, float('nan')]) - # y = xp.searchsorted(bins, x, side=self.side) - # return y, + # x = testing.shaped_arange((10,), xp, xp.float64) + # x[-1] = float('nan') + # bins = xp.array([0, 1, 2, 4, float('nan')]) + # y = xp.searchsorted(bins, x, side=self.side) + # return y, # @testing.numpy_cupy_array_equal() # def test_searchsorted_nan_last_repeat(self, xp): - # x = testing.shaped_arange((10,), xp, xp.float64) - # x[-1] = float('nan') - # bins = xp.array([0, 1, 2, float('nan'), float('nan')]) - # y = xp.searchsorted(bins, x, side=self.side) - # return y, + # x = testing.shaped_arange((10,), xp, xp.float64) + # x[-1] = float('nan') + # bins = xp.array([0, 1, 2, float('nan'), float('nan')]) + # y = xp.searchsorted(bins, x, side=self.side) + # return y, # @testing.numpy_cupy_array_equal() # def test_searchsorted_all_nans(self, xp): - # x = testing.shaped_arange((10,), xp, xp.float64) - # x[-1] = float('nan') - # bins = xp.array([float('nan'), float('nan'), float('nan'), - # float('nan'), float('nan')]) - # y = xp.searchsorted(bins, x, side=self.side) - # return y, -############################################################################### + # x = testing.shaped_arange((10,), xp, xp.float64) + # x[-1] = float('nan') + # bins = xp.array([float('nan'), float('nan'), float('nan'), + # float('nan'), float('nan')]) + # y = xp.searchsorted(bins, x, side=self.side) + # return y, + ############################################################################### @testing.numpy_cupy_array_equal() def test_searchsorted_inf(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) - x[5] = float('inf') + x[5] = float("inf") bins = xp.array([0, 1, 2, 4, 10]) y = xp.searchsorted(bins, x, side=self.side) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_searchsorted_minf(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) - x[5] = float('-inf') + x[5] = float("-inf") bins = xp.array([0, 1, 2, 4, 10]) y = xp.searchsorted(bins, x, side=self.side) - return y, + return (y,) @testing.gpu @@ -664,14 +659,13 @@ def test_searchsorted_ndbins(self): @testing.gpu class TestSearchSortedWithSorter(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_sorter(self, xp): x = testing.shaped_arange((12,), xp, xp.float64) bins = xp.array([10, 4, 2, 1, 8]) sorter = xp.array([3, 2, 1, 4, 0]) y = xp.searchsorted(bins, x, sorter=sorter) - return y, + return (y,) def test_invalid_sorter(self): for xp in (numpy, cupy): diff --git a/tests/third_party/cupy/sorting_tests/test_sort.py b/tests/third_party/cupy/sorting_tests/test_sort.py index 966ccbf309b6..3e13907e53bf 100644 --- a/tests/third_party/cupy/sorting_tests/test_sort.py +++ b/tests/third_party/cupy/sorting_tests/test_sort.py @@ -170,7 +170,7 @@ def test_external_sort_invalid_negative_axis2(self): # Test NaN ordering - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan1(self, xp, dtype): a = testing.shaped_random((10,), xp, dtype) @@ -178,7 +178,7 @@ def test_nan1(self, xp, dtype): out = xp.sort(a) return out - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan2(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -186,7 +186,7 @@ def test_nan2(self, xp, dtype): out = xp.sort(a, axis=0) return out - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan3(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -194,7 +194,7 @@ def test_nan3(self, xp, dtype): out = xp.sort(a, axis=1) return out - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan4(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -209,7 +209,7 @@ class TestLexsort(unittest.TestCase): # Test ranks # TODO(niboshi): Fix xfail - @pytest.mark.xfail(reason='Explicit error types required') + @pytest.mark.xfail(reason="Explicit error types required") def test_lexsort_zero_dim(self): for xp in (numpy, cupy): a = testing.shaped_random((), xp) @@ -223,8 +223,9 @@ def test_lexsort_one_dim(self, xp): @testing.numpy_cupy_array_equal def test_lexsort_two_dim(self, xp): - a = xp.array([[9, 4, 0, 4, 0, 2, 1], - [1, 5, 1, 4, 3, 4, 4]]) # from numpy.lexsort example + a = xp.array( + [[9, 4, 0, 4, 0, 2, 1], [1, 5, 1, 4, 3, 4, 4]] + ) # from numpy.lexsort example return xp.lexsort(a) def test_lexsort_three_or_more_dim(self): @@ -242,21 +243,21 @@ def test_lexsort_dtype(self, xp, dtype): # Test NaN ordering - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan1(self, xp, dtype): a = testing.shaped_random((2, 10), xp, dtype) a[0, 2] = a[0, 6] = xp.nan return xp.lexsort(a) - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan2(self, xp, dtype): a = testing.shaped_random((2, 10), xp, dtype) a[1, 2] = a[0, 6] = xp.nan return xp.lexsort(a) - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan3(self, xp, dtype): a = testing.shaped_random((2, 10), xp, dtype) @@ -281,12 +282,15 @@ def test_F_order(self, xp): return xp.lexsort(a) -@testing.parameterize(*testing.product({ - 'external': [False, True], -})) +@testing.parameterize( + *testing.product( + { + "external": [False, True], + } + ) +) @testing.gpu class TestArgsort(unittest.TestCase): - def argsort(self, a, axis=-1): if self.external: xp = cupy.get_array_module(a) @@ -385,14 +389,14 @@ def test_argsort_original_array_not_modified_multi_dim(self): # Test NaN ordering - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan1(self, xp, dtype): a = testing.shaped_random((10,), xp, dtype) a[2] = a[6] = xp.nan return self.argsort(a) - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_nan2(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -406,7 +410,7 @@ class TestMsort(unittest.TestCase): # Test base cases # TODO(niboshi): Fix xfail - @pytest.mark.xfail(reason='Explicit error types required') + @pytest.mark.xfail(reason="Explicit error types required") def test_msort_zero_dim(self): for xp in (numpy, cupy): a = testing.shaped_random((), xp) @@ -428,7 +432,6 @@ def test_msort_multi_dim(self, xp, dtype): @testing.gpu class TestSort_complex(unittest.TestCase): - def test_sort_complex_zero_dim(self): for xp in (numpy, cupy): a = testing.shaped_random((), xp) @@ -447,7 +450,7 @@ def test_sort_complex_ndim(self, xp, dtype): a = testing.shaped_random((2, 5, 3), xp, dtype) return a, xp.sort_complex(a) - @testing.for_dtypes('efdFD') + @testing.for_dtypes("efdFD") @testing.numpy_cupy_array_equal() def test_sort_complex_nan(self, xp, dtype): a = testing.shaped_random((2, 3, 5), xp, dtype) @@ -455,13 +458,16 @@ def test_sort_complex_nan(self, xp, dtype): return a, xp.sort_complex(a) -@testing.parameterize(*testing.product({ - 'external': [False, True], - 'length': [10, 20000], -})) +@testing.parameterize( + *testing.product( + { + "external": [False, True], + "length": [10, 20000], + } + ) +) @testing.gpu class TestPartition(unittest.TestCase): - def partition(self, a, kth, axis=-1): if self.external: xp = cupy.get_array_module(a) @@ -485,8 +491,8 @@ def test_partition_one_dim(self, xp, dtype): a = testing.shaped_random((self.length,), xp, dtype) kth = 2 x = self.partition(a, kth) - self.assertTrue(xp.all(x[0:kth] <= x[kth:kth + 1])) - self.assertTrue(xp.all(x[kth:kth + 1] <= x[kth + 1:])) + self.assertTrue(xp.all(x[0:kth] <= x[kth : kth + 1])) + self.assertTrue(xp.all(x[kth : kth + 1] <= x[kth + 1 :])) return x[kth] @testing.for_all_dtypes() @@ -495,9 +501,9 @@ def test_partition_multi_dim(self, xp, dtype): a = testing.shaped_random((10, 10, self.length), xp, dtype) kth = 2 x = self.partition(a, kth) - self.assertTrue(xp.all(x[:, :, 0:kth] <= x[:, :, kth:kth + 1])) - self.assertTrue(xp.all(x[:, :, kth:kth + 1] <= x[:, :, kth + 1:])) - return x[:, :, kth:kth + 1] + self.assertTrue(xp.all(x[:, :, 0:kth] <= x[:, :, kth : kth + 1])) + self.assertTrue(xp.all(x[:, :, kth : kth + 1] <= x[:, :, kth + 1 :])) + return x[:, :, kth : kth + 1] # Test non-contiguous array @@ -512,8 +518,8 @@ def test_partition_non_contiguous(self, xp): return 0 # dummy else: x = self.partition(a, kth) - self.assertTrue(xp.all(x[0:kth] <= x[kth:kth + 1])) - self.assertTrue(xp.all(x[kth:kth + 1] <= x[kth + 1:])) + self.assertTrue(xp.all(x[0:kth] <= x[kth : kth + 1])) + self.assertTrue(xp.all(x[kth : kth + 1] <= x[kth + 1 :])) return x[kth] # Test kth @@ -606,12 +612,15 @@ def test_partition_invalid_negative_axis2(self): return self.partition(a, kth, axis=axis) -@testing.parameterize(*testing.product({ - 'external': [False, True], -})) +@testing.parameterize( + *testing.product( + { + "external": [False, True], + } + ) +) @testing.gpu class TestArgpartition(unittest.TestCase): - def argpartition(self, a, kth, axis=-1): if self.external: xp = cupy.get_array_module(a) @@ -635,7 +644,7 @@ def test_argpartition_one_dim(self, xp, dtype): kth = 2 idx = self.argpartition(a, kth) self.assertTrue((a[idx[:kth]] < a[idx[kth]]).all()) - self.assertTrue((a[idx[kth]] < a[idx[kth + 1:]]).all()) + self.assertTrue((a[idx[kth]] < a[idx[kth + 1 :]]).all()) return idx[kth] # TODO(leofang): test all dtypes -- this workaround needs to be kept, @@ -648,21 +657,29 @@ def test_argpartition_multi_dim(self, xp, dtype): idx = self.argpartition(a, kth) rows = [[[0]], [[1]], [[2]]] cols = [[[0], [1], [2]]] - self.assertTrue((a[rows, cols, idx[:, :, :kth]] < - a[rows, cols, idx[:, :, kth:kth + 1]]).all()) - self.assertTrue((a[rows, cols, idx[:, :, kth:kth + 1]] < - a[rows, cols, idx[:, :, kth + 1:]]).all()) - return idx[:, :, kth:kth + 1] + self.assertTrue( + ( + a[rows, cols, idx[:, :, :kth]] + < a[rows, cols, idx[:, :, kth : kth + 1]] + ).all() + ) + self.assertTrue( + ( + a[rows, cols, idx[:, :, kth : kth + 1]] + < a[rows, cols, idx[:, :, kth + 1 :]] + ).all() + ) + return idx[:, :, kth : kth + 1] # Test non-contiguous array @testing.numpy_cupy_equal() def test_argpartition_non_contiguous(self, xp): - a = testing.shaped_random((10,), xp, 'i', 100)[::2] + a = testing.shaped_random((10,), xp, "i", 100)[::2] kth = 2 idx = self.argpartition(a, kth) self.assertTrue((a[idx[:kth]] < a[idx[kth]]).all()) - self.assertTrue((a[idx[kth]] < a[idx[kth + 1:]]).all()) + self.assertTrue((a[idx[kth]] < a[idx[kth + 1 :]]).all()) return idx[kth] # Test kth @@ -674,7 +691,7 @@ def test_argpartition_sequence_kth(self, xp): idx = self.argpartition(a, kth) for _kth in kth: self.assertTrue((a[idx[:_kth]] < a[idx[_kth]]).all()) - self.assertTrue((a[idx[_kth]] < a[idx[_kth + 1:]]).all()) + self.assertTrue((a[idx[_kth]] < a[idx[_kth + 1 :]]).all()) return (idx[2], idx[4]) @testing.numpy_cupy_equal() @@ -683,7 +700,7 @@ def test_argpartition_negative_kth(self, xp): kth = -3 idx = self.argpartition(a, kth) self.assertTrue((a[idx[:kth]] < a[idx[kth]]).all()) - self.assertTrue((a[idx[kth]] < a[idx[kth + 1:]]).all()) + self.assertTrue((a[idx[kth]] < a[idx[kth + 1 :]]).all()) return idx[kth] def test_argpartition_invalid_kth(self): @@ -710,11 +727,19 @@ def test_argpartition_axis(self, xp): idx = self.argpartition(a, kth, axis=axis) rows = [[[0], [1], [2]]] cols = [[[0, 1, 2]]] - self.assertTrue((a[idx[:kth, :, :], rows, cols] < - a[idx[kth:kth + 1, :, :], rows, cols]).all()) - self.assertTrue((a[idx[kth:kth + 1, :, :], rows, cols] < - a[idx[kth + 1:, :, :], rows, cols]).all()) - return idx[kth:kth + 1, :, :] + self.assertTrue( + ( + a[idx[:kth, :, :], rows, cols] + < a[idx[kth : kth + 1, :, :], rows, cols] + ).all() + ) + self.assertTrue( + ( + a[idx[kth : kth + 1, :, :], rows, cols] + < a[idx[kth + 1 :, :, :], rows, cols] + ).all() + ) + return idx[kth : kth + 1, :, :] @testing.numpy_cupy_array_equal() def test_argpartition_negative_axis(self, xp): @@ -724,11 +749,19 @@ def test_argpartition_negative_axis(self, xp): idx = self.argpartition(a, kth, axis=axis) rows = [[[0]], [[1]], [[2]]] cols = [[[0], [1], [2]]] - self.assertTrue((a[rows, cols, idx[:, :, :kth]] < - a[rows, cols, idx[:, :, kth:kth + 1]]).all()) - self.assertTrue((a[rows, cols, idx[:, :, kth:kth + 1]] < - a[rows, cols, idx[:, :, kth + 1:]]).all()) - return idx[:, :, kth:kth + 1] + self.assertTrue( + ( + a[rows, cols, idx[:, :, :kth]] + < a[rows, cols, idx[:, :, kth : kth + 1]] + ).all() + ) + self.assertTrue( + ( + a[rows, cols, idx[:, :, kth : kth + 1]] + < a[rows, cols, idx[:, :, kth + 1 :]] + ).all() + ) + return idx[:, :, kth : kth + 1] @testing.numpy_cupy_equal() def test_argpartition_none_axis(self, xp): @@ -738,7 +771,7 @@ def test_argpartition_none_axis(self, xp): idx = self.argpartition(a, kth, axis=axis) a1 = a.flatten() self.assertTrue((a1[idx[:kth]] < a1[idx[kth]]).all()) - self.assertTrue((a1[idx[kth]] < a1[idx[kth + 1:]]).all()) + self.assertTrue((a1[idx[kth]] < a1[idx[kth + 1 :]]).all()) return idx[kth] def test_argpartition_invalid_axis1(self): diff --git a/tests/third_party/cupy/statistics_tests/test_correlation.py b/tests/third_party/cupy/statistics_tests/test_correlation.py index f23d49114277..fff44fe3b34f 100644 --- a/tests/third_party/cupy/statistics_tests/test_correlation.py +++ b/tests/third_party/cupy/statistics_tests/test_correlation.py @@ -9,7 +9,6 @@ @testing.gpu class TestCorrcoef(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_corrcoef(self, xp, dtype): @@ -39,7 +38,6 @@ def test_corrcoef_rowvar(self, xp, dtype): @testing.gpu class TestCov(unittest.TestCase): - def generate_input(self, a_shape, y_shape, xp, dtype): a = testing.shaped_arange(a_shape, xp, dtype) y = None @@ -49,22 +47,45 @@ def generate_input(self, a_shape, y_shape, xp, dtype): @testing.for_all_dtypes() @testing.numpy_cupy_allclose() - def check(self, a_shape, y_shape=None, rowvar=True, bias=False, - ddof=None, xp=None, dtype=None): + def check( + self, + a_shape, + y_shape=None, + rowvar=True, + bias=False, + ddof=None, + xp=None, + dtype=None, + ): a, y = self.generate_input(a_shape, y_shape, xp, dtype) return xp.cov(a, y, rowvar, bias, ddof) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() - def check_warns(self, a_shape, y_shape=None, rowvar=True, bias=False, - ddof=None, xp=None, dtype=None): + def check_warns( + self, + a_shape, + y_shape=None, + rowvar=True, + bias=False, + ddof=None, + xp=None, + dtype=None, + ): with testing.assert_warns(RuntimeWarning): a, y = self.generate_input(a_shape, y_shape, xp, dtype) return xp.cov(a, y, rowvar, bias, ddof) @testing.for_all_dtypes() - def check_raises(self, a_shape, y_shape=None, rowvar=True, bias=False, - ddof=None, dtype=None): + def check_raises( + self, + a_shape, + y_shape=None, + rowvar=True, + bias=False, + ddof=None, + dtype=None, + ): for xp in (numpy, cupy): a, y = self.generate_input(a_shape, y_shape, xp, dtype) with pytest.raises(ValueError): @@ -92,13 +113,16 @@ def test_cov_empty(self): @testing.gpu -@testing.parameterize(*testing.product({ - 'mode': ['valid', 'same', 'full'], - 'shape1': [(5,), (6,), (20,), (21,)], - 'shape2': [(5,), (6,), (20,), (21,)], -})) +@testing.parameterize( + *testing.product( + { + "mode": ["valid", "same", "full"], + "shape1": [(5,), (6,), (20,), (21,)], + "shape2": [(5,), (6,), (20,), (21,)], + } + ) +) class TestCorrelateShapeCombination(unittest.TestCase): - @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-4) def test_correlate(self, xp, dtype): @@ -108,11 +132,8 @@ def test_correlate(self, xp, dtype): @testing.gpu -@testing.parameterize(*testing.product({ - 'mode': ['valid', 'full', 'same'] -})) +@testing.parameterize(*testing.product({"mode": ["valid", "full", "same"]})) class TestCorrelate(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-5) def test_correlate_non_contiguous(self, xp, dtype): @@ -127,7 +148,7 @@ def test_correlate_large_non_contiguous(self, xp, dtype): b = testing.shaped_arange((1000,), xp, dtype) return xp.correlate(a[200::], b[10::700], mode=self.mode) - @testing.for_all_dtypes_combination(names=['dtype1', 'dtype2']) + @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_allclose(rtol=1e-2) def test_correlate_diff_types(self, xp, dtype1, dtype2): a = testing.shaped_random((200,), xp, dtype1) @@ -136,12 +157,9 @@ def test_correlate_diff_types(self, xp, dtype1, dtype2): @testing.gpu -@testing.parameterize(*testing.product({ - 'mode': ['valid', 'same', 'full'] -})) +@testing.parameterize(*testing.product({"mode": ["valid", "same", "full"]})) class TestCorrelateInvalid(unittest.TestCase): - - @testing.with_requires('numpy>=1.18') + @testing.with_requires("numpy>=1.18") @testing.for_all_dtypes() def test_correlate_empty(self, dtype): for xp in (numpy, cupy): diff --git a/tests/third_party/cupy/statistics_tests/test_histogram.py b/tests/third_party/cupy/statistics_tests/test_histogram.py index 0560dc00256e..52b75101ed3c 100644 --- a/tests/third_party/cupy/statistics_tests/test_histogram.py +++ b/tests/third_party/cupy/statistics_tests/test_histogram.py @@ -6,6 +6,7 @@ import dpnp as cupy from tests.third_party.cupy import testing + # from cupy.core import _accelerator @@ -13,24 +14,28 @@ # as it casts an input array to intp. # And it does not support uint32, int64 and uint64 on 32-bit environment. _all_types = ( - numpy.float16, numpy.float32, numpy.float64, - numpy.int8, numpy.int16, numpy.int32, - numpy.uint8, numpy.uint16, - numpy.bool_) -_signed_types = ( - numpy.int8, numpy.int16, numpy.int32, - numpy.bool_) - -if sys.maxsize > 2 ** 32: + numpy.float16, + numpy.float32, + numpy.float64, + numpy.int8, + numpy.int16, + numpy.int32, + numpy.uint8, + numpy.uint16, + numpy.bool_, +) +_signed_types = (numpy.int8, numpy.int16, numpy.int32, numpy.bool_) + +if sys.maxsize > 2**32: _all_types = _all_types + (numpy.int64, numpy.uint32) _signed_types = _signed_types + (numpy.int64,) -def for_all_dtypes_bincount(name='dtype'): +def for_all_dtypes_bincount(name="dtype"): return testing.for_dtypes(_all_types, name=name) -def for_signed_dtypes_bincount(name='dtype'): +def for_signed_dtypes_bincount(name="dtype"): return testing.for_dtypes(_signed_types, name=name) @@ -40,7 +45,6 @@ def for_all_dtypes_combination_bincount(names): @testing.gpu class TestHistogram(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_histogram(self, xp, dtype): @@ -69,7 +73,7 @@ def test_histogram_density(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_histogram_range_lower_outliers(self, xp, dtype): # Check that lower outliers are not tallied - a = xp.arange(10, dtype=dtype) + .5 + a = xp.arange(10, dtype=dtype) + 0.5 h, b = xp.histogram(a, range=[0, 9]) assert int(h.sum()) == 9 return h, b @@ -78,7 +82,7 @@ def test_histogram_range_lower_outliers(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_histogram_range_upper_outliers(self, xp, dtype): # Check that upper outliers are not tallied - a = xp.arange(10, dtype=dtype) + .5 + a = xp.arange(10, dtype=dtype) + 0.5 h, b = xp.histogram(a, range=[1, 10]) assert int(h.sum()) == 9 return h, b @@ -86,7 +90,7 @@ def test_histogram_range_upper_outliers(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_histogram_range_with_density(self, xp, dtype): - a = xp.arange(10, dtype=dtype) + .5 + a = xp.arange(10, dtype=dtype) + 0.5 h, b = xp.histogram(a, range=[1, 9], density=True) # check normalization testing.assert_allclose(float((h * xp.diff(b)).sum()), 1) @@ -95,8 +99,8 @@ def test_histogram_range_with_density(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_histogram_range_with_weights_and_density(self, xp, dtype): - a = xp.arange(10, dtype=dtype) + .5 - w = xp.arange(10, dtype=dtype) + .5 + a = xp.arange(10, dtype=dtype) + 0.5 + w = xp.arange(10, dtype=dtype) + 0.5 h, b = xp.histogram(a, range=[1, 9], weights=w, density=True) testing.assert_allclose(float((h * xp.diff(b)).sum()), 1) return h @@ -115,8 +119,8 @@ def test_histogram_invalid_range2(self): @testing.for_all_dtypes(no_bool=True, no_complex=True) def test_histogram_weights_mismatch(self, dtype): for xp in (numpy, cupy): - a = xp.arange(10, dtype=dtype) + .5 - w = xp.arange(11, dtype=dtype) + .5 + a = xp.arange(10, dtype=dtype) + 0.5 + w = xp.arange(11, dtype=dtype) + 0.5 with pytest.raises(ValueError): xp.histogram(a, range=[1, 9], weights=w, density=True) @@ -175,7 +179,8 @@ def test_histogram_int_weights_normalized(self, xp, dtype): w = xp.asarray([4, 3, 2, 1], dtype=dtype) wa, wb = xp.histogram(v, bins=4, weights=w, density=True) testing.assert_array_almost_equal( - wa, xp.asarray([4, 5, 0, 1]) / 10. / 3. * 4) + wa, xp.asarray([4, 5, 0, 1]) / 10.0 / 3.0 * 4 + ) return wb @testing.for_int_dtypes(no_bool=True) @@ -186,8 +191,9 @@ def test_histogram_int_weights_nonuniform_bins(self, xp, dtype): xp.arange(9, dtype=dtype), xp.asarray([0, 1, 3, 6, 10], dtype=dtype), weights=xp.asarray([2, 1, 1, 1, 1, 1, 1, 1, 1], dtype=dtype), - density=True) - testing.assert_array_almost_equal(a, [.2, .1, .1, .075]) + density=True, + ) + testing.assert_array_almost_equal(a, [0.2, 0.1, 0.1, 0.075]) return a, b @testing.for_complex_dtypes() @@ -196,8 +202,7 @@ def test_histogram_complex_weights(self, xp, dtype): values = xp.asarray([1.3, 2.5, 2.3]) weights = xp.asarray([1, -1, 2]) + 1j * xp.asarray([2, 1, 2]) weights = weights.astype(dtype) - a, b = xp.histogram( - values, bins=2, weights=weights) + a, b = xp.histogram(values, bins=2, weights=weights) return a, b @testing.for_complex_dtypes() @@ -206,8 +211,7 @@ def test_histogram_complex_weights_uneven_bins(self, xp, dtype): values = xp.asarray([1.3, 2.5, 2.3]) weights = xp.asarray([1, -1, 2]) + 1j * xp.asarray([2, 1, 2]) weights = weights.astype(dtype) - a, b = xp.histogram( - values, bins=xp.asarray([0, 2, 3]), weights=weights) + a, b = xp.histogram(values, bins=xp.asarray([0, 2, 3]), weights=weights) return a, b @testing.for_all_dtypes(no_bool=True, no_complex=True) @@ -269,7 +273,7 @@ def test_bincount_duplicated_value(self, xp, dtype): x = xp.array([1, 2, 2, 1, 2, 4], dtype) return xp.bincount(x) - @for_all_dtypes_combination_bincount(names=['x_type', 'w_type']) + @for_all_dtypes_combination_bincount(names=["x_type", "w_type"]) @testing.numpy_cupy_allclose(accept_error=TypeError) def test_bincount_with_weight(self, xp, x_type, w_type): x = testing.shaped_arange((3,), xp, x_type) @@ -282,7 +286,7 @@ def test_bincount_with_minlength(self, xp, dtype): x = testing.shaped_arange((3,), xp, dtype) return xp.bincount(x, minlength=5) - @for_all_dtypes_combination_bincount(names=['x_type', 'w_type']) + @for_all_dtypes_combination_bincount(names=["x_type", "w_type"]) def test_bincount_invalid_weight_length(self, x_type, w_type): for xp in (numpy, cupy): x = testing.shaped_arange((1,), xp, x_type) @@ -334,53 +338,56 @@ def test_bincount_too_small_minlength(self, dtype): # @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled') # class TestCubHistogram(unittest.TestCase): - # def setUp(self): - # self.old_accelerators = _accelerator.get_routine_accelerators() - # _accelerator.set_routine_accelerators(['cub']) +# def setUp(self): +# self.old_accelerators = _accelerator.get_routine_accelerators() +# _accelerator.set_routine_accelerators(['cub']) - # def tearDown(self): - # _accelerator.set_routine_accelerators(self.old_accelerators) +# def tearDown(self): +# _accelerator.set_routine_accelerators(self.old_accelerators) - # @testing.for_all_dtypes(no_bool=True, no_complex=True) - # @testing.numpy_cupy_array_equal() - # def test_histogram(self, xp, dtype): - # x = testing.shaped_arange((10,), xp, dtype) +# @testing.for_all_dtypes(no_bool=True, no_complex=True) +# @testing.numpy_cupy_array_equal() +# def test_histogram(self, xp, dtype): +# x = testing.shaped_arange((10,), xp, dtype) - # if xp is numpy: - # return xp.histogram(x) +# if xp is numpy: +# return xp.histogram(x) - # # xp is cupy, first ensure we really use CUB - # cub_func = 'cupy._statistics.histogram.cub.device_histogram' - # with testing.AssertFunctionIsCalled(cub_func): - # xp.histogram(x) - # # ...then perform the actual computation - # return xp.histogram(x) +# # xp is cupy, first ensure we really use CUB +# cub_func = 'cupy._statistics.histogram.cub.device_histogram' +# with testing.AssertFunctionIsCalled(cub_func): +# xp.histogram(x) +# # ...then perform the actual computation +# return xp.histogram(x) - # @testing.for_all_dtypes(no_bool=True, no_complex=True) - # @testing.numpy_cupy_array_equal() - # def test_histogram_range_float(self, xp, dtype): - # a = testing.shaped_arange((10,), xp, dtype) - # h, b = xp.histogram(a, testing.shaped_arange((10,), xp, numpy.float64)) - # assert int(h.sum()) == 10 - # return h, b +# @testing.for_all_dtypes(no_bool=True, no_complex=True) +# @testing.numpy_cupy_array_equal() +# def test_histogram_range_float(self, xp, dtype): +# a = testing.shaped_arange((10,), xp, dtype) +# h, b = xp.histogram(a, testing.shaped_arange((10,), xp, numpy.float64)) +# assert int(h.sum()) == 10 +# return h, b @testing.gpu -@testing.parameterize(*testing.product( - {'bins': [ - # Test monotonically increasing with in-bounds values - [1.5, 2.5, 4.0, 6.0], - # Explicit out-of-bounds for x values - [-1.0, 1.0, 2.5, 4.0, 20.0], - # Repeated values should yield right-most or left-most indexes - [0.0, 1.0, 1.0, 4.0, 4.0, 10.0], - ], - 'increasing': [True, False], - 'right': [True, False], - 'shape': [(), (10,), (6, 3, 3)]}) +@testing.parameterize( + *testing.product( + { + "bins": [ + # Test monotonically increasing with in-bounds values + [1.5, 2.5, 4.0, 6.0], + # Explicit out-of-bounds for x values + [-1.0, 1.0, 2.5, 4.0, 20.0], + # Repeated values should yield right-most or left-most indexes + [0.0, 1.0, 1.0, 4.0, 4.0, 10.0], + ], + "increasing": [True, False], + "right": [True, False], + "shape": [(), (10,), (6, 3, 3)], + } + ) ) class TestDigitize(unittest.TestCase): - @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_digitize(self, xp, dtype): @@ -390,86 +397,82 @@ def test_digitize(self, xp, dtype): bins = bins[::-1] bins = xp.array(bins) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.gpu -@testing.parameterize( - {'right': True}, - {'right': False}) +@testing.parameterize({"right": True}, {"right": False}) class TestDigitizeNanInf(unittest.TestCase): - @testing.numpy_cupy_array_equal() def test_digitize_nan(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - x[5] = float('nan') + x[5] = float("nan") bins = xp.array([1.0, 3.0, 5.0, 8.0, 12.0], xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_digitize_nan_bins(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - bins = xp.array([1.0, 3.0, 5.0, 8.0, float('nan')], xp.float32) + bins = xp.array([1.0, 3.0, 5.0, 8.0, float("nan")], xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_digitize_nan_bins_repeated(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - x[5] = float('nan') - bins = [1.0, 3.0, 5.0, 8.0, float('nan'), float('nan')] + x[5] = float("nan") + bins = [1.0, 3.0, 5.0, 8.0, float("nan"), float("nan")] bins = xp.array(bins, xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_digitize_nan_bins_decreasing(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - x[5] = float('nan') - bins = [float('nan'), 8.0, 5.0, 3.0, 1.0] + x[5] = float("nan") + bins = [float("nan"), 8.0, 5.0, 3.0, 1.0] bins = xp.array(bins, xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_digitize_nan_bins_decreasing_repeated(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - x[5] = float('nan') - bins = [float('nan'), float('nan'), float('nan'), 5.0, 3.0, 1.0] + x[5] = float("nan") + bins = [float("nan"), float("nan"), float("nan"), 5.0, 3.0, 1.0] bins = xp.array(bins, xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_digitize_all_nan_bins(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) - x[5] = float('nan') - bins = [float('nan'), float('nan'), float('nan'), float('nan')] + x[5] = float("nan") + bins = [float("nan"), float("nan"), float("nan"), float("nan")] bins = xp.array(bins, xp.float32) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_searchsorted_inf(self, xp): x = testing.shaped_arange((14,), xp, xp.float64) - x[5] = float('inf') + x[5] = float("inf") bins = xp.array([0, 1, 2, 4, 10]) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.numpy_cupy_array_equal() def test_searchsorted_minf(self, xp): x = testing.shaped_arange((14,), xp, xp.float64) - x[5] = float('-inf') + x[5] = float("-inf") bins = xp.array([0, 1, 2, 4, 10]) y = xp.digitize(x, bins, right=self.right) - return y, + return (y,) @testing.gpu class TestDigitizeInvalid(unittest.TestCase): - def test_digitize_complex(self): for xp in (numpy, cupy): x = testing.shaped_arange((14,), xp, xp.complex) diff --git a/tests/third_party/cupy/statistics_tests/test_meanvar.py b/tests/third_party/cupy/statistics_tests/test_meanvar.py index ce6953812bbe..29e229bf921c 100644 --- a/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -1,18 +1,18 @@ import unittest -import pytest import numpy +import pytest import dpnp as cupy from tests.third_party.cupy import testing ignore_runtime_warnings = pytest.mark.filterwarnings( - "ignore", category=RuntimeWarning) + "ignore", category=RuntimeWarning +) @testing.gpu class TestMedian(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_median_noaxis(self, xp, dtype): @@ -62,19 +62,27 @@ def test_median_invalid_axis(self): return xp.median(a, (-a.ndim - 1, 1), keepdims=False) with pytest.raises(numpy.AxisError): - return xp.median(a, (0, a.ndim,), keepdims=False) + return xp.median( + a, + ( + 0, + a.ndim, + ), + keepdims=False, + ) @testing.parameterize( - *testing.product({ - 'shape': [(3, 4, 5)], - 'axis': [(0, 1), (0, -1), (1, 2), (1,)], - 'keepdims': [True, False] - }) + *testing.product( + { + "shape": [(3, 4, 5)], + "axis": [(0, 1), (0, -1), (1, 2), (1,)], + "keepdims": [True, False], + } + ) ) @testing.gpu class TestMedianAxis(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_median_axis_sequence(self, xp, dtype): @@ -115,9 +123,9 @@ def test_average_axis_weights(self, xp, dtype): def check_returned(self, a, axis, weights): average_cpu, sum_weights_cpu = numpy.average( - a, axis, weights, returned=True) - result = cupy.average( - cupy.asarray(a), axis, weights, returned=True) + a, axis, weights, returned=True + ) + result = cupy.average(cupy.asarray(a), axis, weights, returned=True) self.assertTrue(isinstance(result, tuple)) self.assertEqual(len(result), 2) average_gpu, sum_weights_gpu = result @@ -135,7 +143,6 @@ def test_returned(self, dtype): @testing.gpu class TestMeanVar(unittest.TestCase): - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_mean_all(self, xp, dtype): @@ -276,15 +283,16 @@ def test_external_std_axis_ddof(self, xp, dtype): @testing.parameterize( - *testing.product({ - 'shape': [(3, 4), (30, 40, 50)], - 'axis': [None, 0, 1], - 'keepdims': [True, False] - }) + *testing.product( + { + "shape": [(3, 4), (30, 40, 50)], + "axis": [None, 0, 1], + "keepdims": [True, False], + } + ) ) @testing.gpu class TestNanMean(unittest.TestCase): - @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanmean_without_nan(self, xp, dtype): @@ -297,7 +305,7 @@ def test_nanmean_without_nan(self, xp, dtype): def test_nanmean_with_nan_float(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[1, :] = xp.nan a[:, 3] = xp.nan @@ -306,7 +314,6 @@ def test_nanmean_with_nan_float(self, xp, dtype): @testing.gpu class TestNanMeanAdditional(unittest.TestCase): - @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) @@ -314,7 +321,7 @@ def test_nanmean_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30), dtype=dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[1, :] = xp.nan a[:, 3] = xp.nan @@ -327,7 +334,7 @@ def test_nanmean_out(self, xp, dtype): def test_nanmean_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[:512, :256] = xp.nan return xp.nanmean(a, axis=1) @@ -347,39 +354,42 @@ def test_nanmean_all_nan(self, xp): @testing.parameterize( - *testing.product({ - 'shape': [(3, 4), (4, 3, 5)], - 'axis': [None, 0, 1], - 'keepdims': [True, False], - 'ddof': [0, 1] - })) + *testing.product( + { + "shape": [(3, 4), (4, 3, 5)], + "axis": [None, 0, 1], + "keepdims": [True, False], + "ddof": [0, 1], + } + ) +) @testing.gpu class TestNanVarStd(unittest.TestCase): - @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanvar(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[0, :] = xp.nan return xp.nanvar( - a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims) + a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims + ) @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[0, :] = xp.nan return xp.nanstd( - a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims) + a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims + ) @testing.gpu class TestNanVarStdAdditional(unittest.TestCase): - @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) @@ -387,7 +397,7 @@ def test_nanvar_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30)) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[1, :] = xp.nan a[:, 3] = xp.nan @@ -400,7 +410,7 @@ def test_nanvar_out(self, xp, dtype): def test_nanvar_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[:512, :256] = xp.nan return xp.nanvar(a, axis=1) @@ -418,7 +428,7 @@ def test_nanstd_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30)) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[1, :] = xp.nan a[:, 3] = xp.nan @@ -431,7 +441,7 @@ def test_nanstd_out(self, xp, dtype): def test_nanstd_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) - if a.dtype.kind not in 'biu': + if a.dtype.kind not in "biu": a[:512, :256] = xp.nan return xp.nanstd(a, axis=1) @@ -443,20 +453,23 @@ def test_nanstd_float16(self, xp): return xp.nanstd(a, axis=1) -@testing.parameterize(*testing.product({ - 'params': [ - ((), None), - ((0,), None), - ((0, 0), None), - ((0, 0), 1), - ((0, 0, 0), None), - ((0, 0, 0), (0, 2)), - ], - 'func': ['mean', 'std', 'var'], -})) +@testing.parameterize( + *testing.product( + { + "params": [ + ((), None), + ((0,), None), + ((0, 0), None), + ((0, 0), 1), + ((0, 0, 0), None), + ((0, 0, 0), (0, 2)), + ], + "func": ["mean", "std", "var"], + } + ) +) @testing.gpu class TestProductZeroLength(unittest.TestCase): - @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_external_mean_zero_len(self, xp, dtype): diff --git a/tests/third_party/cupy/statistics_tests/test_order.py b/tests/third_party/cupy/statistics_tests/test_order.py index 28785181a2b2..2ca82b473b52 100644 --- a/tests/third_party/cupy/statistics_tests/test_order.py +++ b/tests/third_party/cupy/statistics_tests/test_order.py @@ -7,22 +7,21 @@ import dpnp as cupy from tests.third_party.cupy import testing - _all_interpolations = ( - 'lower', - 'higher', - 'midpoint', + "lower", + "higher", + "midpoint", # 'nearest', # TODO(hvy): Not implemented - 'linear') + "linear", +) -def for_all_interpolations(name='interpolation'): +def for_all_interpolations(name="interpolation"): return testing.for_orders(_all_interpolations, name=name) @testing.gpu class TestOrder(unittest.TestCase): - @for_all_interpolations() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() @@ -78,7 +77,8 @@ def test_percentile_keepdims(self, xp, dtype, interpolation): a = testing.shaped_random((7, 2, 9, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) return xp.percentile( - a, q, axis=None, keepdims=True, interpolation=interpolation) + a, q, axis=None, keepdims=True, interpolation=interpolation + ) @for_all_interpolations() @testing.for_float_dtypes(no_float16=True) # NumPy raises error on int8 @@ -88,7 +88,8 @@ def test_percentile_out(self, xp, dtype, interpolation): q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) out = testing.shaped_random((5, 10, 2, 3), xp, dtype) return xp.percentile( - a, q, axis=-1, interpolation=interpolation, out=out) + a, q, axis=-1, interpolation=interpolation, out=out + ) @for_all_interpolations() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @@ -105,7 +106,7 @@ def test_percentile_uxpected_interpolation(self, dtype): a = testing.shaped_random((4, 2, 3, 2), xp, dtype) q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) with pytest.raises(ValueError): - xp.percentile(a, q, axis=-1, interpolation='deadbeef') + xp.percentile(a, q, axis=-1, interpolation="deadbeef") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() @@ -140,16 +141,16 @@ def test_nanmax_axis2(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) with warnings.catch_warnings(): return xp.nanmax(a) @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_all_nan(self, xp, dtype): - a = xp.array([float('nan'), float('nan')], dtype) + a = xp.array([float("nan"), float("nan")], dtype) with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + warnings.simplefilter("always") m = xp.nanmax(a) self.assertEqual(len(w), 1) self.assertIs(w[0].category, RuntimeWarning) @@ -188,16 +189,16 @@ def test_nanmin_axis2(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) with warnings.catch_warnings(): return xp.nanmin(a) @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_all_nan(self, xp, dtype): - a = xp.array([float('nan'), float('nan')], dtype) + a = xp.array([float("nan"), float("nan")], dtype) with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + warnings.simplefilter("always") m = xp.nanmin(a) self.assertEqual(len(w), 1) self.assertIs(w[0].category, RuntimeWarning) @@ -236,11 +237,11 @@ def test_ptp_axis2(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_ptp_nan(self, xp, dtype): - a = xp.array([float('nan'), 1, -1], dtype) + a = xp.array([float("nan"), 1, -1], dtype) return xp.ptp(a) @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_ptp_all_nan(self, xp, dtype): - a = xp.array([float('nan'), float('nan')], dtype) + a = xp.array([float("nan"), float("nan")], dtype) return xp.ptp(a) diff --git a/tests/third_party/cupy/testing/__init__.py b/tests/third_party/cupy/testing/__init__.py index 75dda2890606..1db2ccbae8b1 100644 --- a/tests/third_party/cupy/testing/__init__.py +++ b/tests/third_party/cupy/testing/__init__.py @@ -1,56 +1,62 @@ # from tests.third_party.cupy.testing import array # from tests.third_party.cupy.testing import attr # from tests.third_party.cupy.testing import helper -from tests.third_party.cupy.testing import parameterized -from tests.third_party.cupy.testing import random +from tests.third_party.cupy.testing import parameterized, random + # from tests.third_party.cupy.testing.array import assert_allclose + +# from tests.third_party.cupy.testing.attr import multi_gpu # from tests.third_party.cupy.testing.array import assert_array_almost_equal # from tests.third_party.cupy.testing.array import assert_array_almost_equal_nulp # from tests.third_party.cupy.testing.array import assert_array_equal # from tests.third_party.cupy.testing.array import assert_array_less # from tests.third_party.cupy.testing.array import assert_array_list_equal # from tests.third_party.cupy.testing.array import assert_array_max_ulp -from tests.third_party.cupy.testing.attr import gpu -# from tests.third_party.cupy.testing.attr import multi_gpu -from tests.third_party.cupy.testing.attr import slow -from tests.third_party.cupy.testing.helper import assert_warns -# from tests.third_party.cupy.testing.helper import empty -from tests.third_party.cupy.testing.helper import for_all_dtypes -from tests.third_party.cupy.testing.helper import for_all_dtypes_combination -from tests.third_party.cupy.testing.helper import for_CF_orders -from tests.third_party.cupy.testing.helper import for_complex_dtypes -from tests.third_party.cupy.testing.helper import for_dtypes -from tests.third_party.cupy.testing.helper import for_dtypes_combination -from tests.third_party.cupy.testing.helper import for_float_dtypes -from tests.third_party.cupy.testing.helper import for_int_dtypes -# from tests.third_party.cupy.testing.helper import for_int_dtypes_combination -from tests.third_party.cupy.testing.helper import for_orders -from tests.third_party.cupy.testing.helper import for_signed_dtypes -# from tests.third_party.cupy.testing.helper import for_signed_dtypes_combination -from tests.third_party.cupy.testing.helper import for_unsigned_dtypes -# from tests.third_party.cupy.testing.helper import for_unsigned_dtypes_combination -from tests.third_party.cupy.testing.helper import numpy_cupy_allclose -from tests.third_party.cupy.testing.helper import numpy_cupy_array_almost_equal -# from tests.third_party.cupy.testing.helper import numpy_cupy_array_almost_equal_nulp -from tests.third_party.cupy.testing.helper import numpy_cupy_array_equal -# from tests.third_party.cupy.testing.helper import numpy_cupy_array_less -from tests.third_party.cupy.testing.helper import numpy_cupy_array_list_equal -# from tests.third_party.cupy.testing.helper import numpy_cupy_array_max_ulp -from tests.third_party.cupy.testing.helper import numpy_cupy_equal +from tests.third_party.cupy.testing.attr import gpu, slow + # from tests.third_party.cupy.testing.helper import numpy_cupy_raises -from tests.third_party.cupy.testing.helper import numpy_satisfies -from tests.third_party.cupy.testing.helper import NumpyAliasBasicTestBase -from tests.third_party.cupy.testing.helper import NumpyAliasValuesTestBase -from tests.third_party.cupy.testing.helper import NumpyError -from tests.third_party.cupy.testing.helper import shaped_arange -from tests.third_party.cupy.testing.helper import shaped_random -from tests.third_party.cupy.testing.helper import shaped_reverse_arange -from tests.third_party.cupy.testing.helper import with_requires -from tests.third_party.cupy.testing.parameterized import from_pytest_parameterize -from tests.third_party.cupy.testing.parameterized import parameterize -from tests.third_party.cupy.testing.parameterized import parameterize_pytest -from tests.third_party.cupy.testing.parameterized import product -from tests.third_party.cupy.testing.parameterized import product_dict +# from tests.third_party.cupy.testing.helper import numpy_cupy_array_max_ulp +# from tests.third_party.cupy.testing.helper import numpy_cupy_array_less +# from tests.third_party.cupy.testing.helper import numpy_cupy_array_almost_equal_nulp +# from tests.third_party.cupy.testing.helper import for_unsigned_dtypes_combination +# from tests.third_party.cupy.testing.helper import for_signed_dtypes_combination +# from tests.third_party.cupy.testing.helper import for_int_dtypes_combination +# from tests.third_party.cupy.testing.helper import empty +from tests.third_party.cupy.testing.helper import ( + NumpyAliasBasicTestBase, + NumpyAliasValuesTestBase, + NumpyError, + assert_warns, + for_all_dtypes, + for_all_dtypes_combination, + for_CF_orders, + for_complex_dtypes, + for_dtypes, + for_dtypes_combination, + for_float_dtypes, + for_int_dtypes, + for_orders, + for_signed_dtypes, + for_unsigned_dtypes, + numpy_cupy_allclose, + numpy_cupy_array_almost_equal, + numpy_cupy_array_equal, + numpy_cupy_array_list_equal, + numpy_cupy_equal, + numpy_satisfies, + shaped_arange, + shaped_random, + shaped_reverse_arange, + with_requires, +) +from tests.third_party.cupy.testing.parameterized import ( + from_pytest_parameterize, + parameterize, + parameterize_pytest, + product, + product_dict, +) from tests.third_party.cupy.testing.random import fix_random + # from tests.third_party.cupy.testing.random import generate_seed diff --git a/tests/third_party/cupy/testing/_bundle.py b/tests/third_party/cupy/testing/_bundle.py index cce5cb84849e..85e73f048ac3 100644 --- a/tests/third_party/cupy/testing/_bundle.py +++ b/tests/third_party/cupy/testing/_bundle.py @@ -2,14 +2,14 @@ import inspect import sys - # A tuple that represents a test case. # For bare (non-generated) test cases, [1] and [2] are None. # [0] Test case class # [1] Module name in whicn the class is defined # [2] Class name _TestCaseTuple = collections.namedtuple( - '_TestCaseTuple', ('klass', 'module_name', 'class_name')) + "_TestCaseTuple", ("klass", "module_name", "class_name") +) class _ParameterizedTestCaseBundle(object): @@ -58,6 +58,7 @@ def f(cases): # Return the bundle of generated cases to allow repeated application of # parameterize decorators. return _ParameterizedTestCaseBundle(generated_cases) + return f @@ -67,9 +68,10 @@ def _generate_case(base, module, cls_name, mb, method_generator): members = mb.copy() # ismethod for Python 2 and isfunction for Python 3 base_methods = inspect.getmembers( - base, predicate=lambda m: inspect.ismethod(m) or inspect.isfunction(m)) + base, predicate=lambda m: inspect.ismethod(m) or inspect.isfunction(m) + ) for name, value in base_methods: - if not name.startswith('test_'): + if not name.startswith("test_"): continue value = method_generator(value) # If the return value of method_generator is None, None is assigned @@ -89,10 +91,10 @@ def _generate_test_cases(module_name, base_class, test_case_generator): module = sys.modules[module_name] generated_cases = [] - for cls_name, members, method_generator in ( - test_case_generator(base_class)): + for cls_name, members, method_generator in test_case_generator(base_class): c = _generate_case( - base_class, module, cls_name, members, method_generator) + base_class, module, cls_name, members, method_generator + ) generated_cases.append(c) return generated_cases diff --git a/tests/third_party/cupy/testing/array.py b/tests/third_party/cupy/testing/array.py index 706079de5a60..ba17ee46a5e1 100644 --- a/tests/third_party/cupy/testing/array.py +++ b/tests/third_party/cupy/testing/array.py @@ -2,11 +2,12 @@ import dpnp - # NumPy-like assertion functions that accept both NumPy and CuPy arrays -def assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', - verbose=True): + +def assert_allclose( + actual, desired, rtol=1e-7, atol=0, err_msg="", verbose=True +): """Raises an AssertionError if objects are not equal up to desired tolerance. Args: @@ -22,11 +23,16 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', """ # NOQA numpy.testing.assert_allclose( - dpnp.asnumpy(actual), dpnp.asnumpy(desired), - rtol=rtol, atol=atol, err_msg=err_msg, verbose=verbose) + dpnp.asnumpy(actual), + dpnp.asnumpy(desired), + rtol=rtol, + atol=atol, + err_msg=err_msg, + verbose=verbose, + ) -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): +def assert_array_almost_equal(x, y, decimal=6, err_msg="", verbose=True): """Raises an AssertionError if objects are not equal up to desired precision. Args: @@ -40,8 +46,12 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): .. seealso:: :func:`numpy.testing.assert_array_almost_equal` """ # NOQA numpy.testing.assert_array_almost_equal( - dpnp.asnumpy(x), dpnp.asnumpy(y), decimal=decimal, - err_msg=err_msg, verbose=verbose) + dpnp.asnumpy(x), + dpnp.asnumpy(y), + decimal=decimal, + err_msg=err_msg, + verbose=verbose, + ) def assert_array_almost_equal_nulp(x, y, nulp=1): @@ -55,7 +65,8 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): .. seealso:: :func:`numpy.testing.assert_array_almost_equal_nulp` """ numpy.testing.assert_array_almost_equal_nulp( - dpnp.asnumpy(x), dpnp.asnumpy(y), nulp=nulp) + dpnp.asnumpy(x), dpnp.asnumpy(y), nulp=nulp + ) def assert_array_max_ulp(a, b, maxulp=1, dtype=None): @@ -71,10 +82,11 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): .. seealso:: :func:`numpy.testing.assert_array_max_ulp` """ # NOQA numpy.testing.assert_array_max_ulp( - dpnp.asnumpy(a), dpnp.asnumpy(b), maxulp=maxulp, dtype=dtype) + dpnp.asnumpy(a), dpnp.asnumpy(b), maxulp=maxulp, dtype=dtype + ) -def assert_array_equal(x, y, err_msg='', verbose=True, strides_check=False): +def assert_array_equal(x, y, err_msg="", verbose=True, strides_check=False): """Raises an AssertionError if two array_like objects are not equal. Args: @@ -89,21 +101,21 @@ def assert_array_equal(x, y, err_msg='', verbose=True, strides_check=False): .. seealso:: :func:`numpy.testing.assert_array_equal` """ numpy.testing.assert_array_equal( - dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, - verbose=verbose) + dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, verbose=verbose + ) if strides_check: if x.strides != y.strides: - msg = ['Strides are not equal:'] + msg = ["Strides are not equal:"] if err_msg: - msg = [msg[0] + ' ' + err_msg] + msg = [msg[0] + " " + err_msg] if verbose: - msg.append(' x: {}'.format(x.strides)) - msg.append(' y: {}'.format(y.strides)) - raise AssertionError('\n'.join(msg)) + msg.append(" x: {}".format(x.strides)) + msg.append(" y: {}".format(y.strides)) + raise AssertionError("\n".join(msg)) -def assert_array_list_equal(xlist, ylist, err_msg='', verbose=True): +def assert_array_list_equal(xlist, ylist, err_msg="", verbose=True): """Compares lists of arrays pairwise with ``assert_array_equal``. Args: @@ -126,21 +138,23 @@ def assert_array_list_equal(xlist, ylist, err_msg='', verbose=True): y_type = type(ylist) if x_type is not y_type: raise AssertionError( - 'Matching types of list or tuple are expected, ' - 'but were different types ' - '(xlist:{} ylist:{})'.format(x_type, y_type)) + "Matching types of list or tuple are expected, " + "but were different types " + "(xlist:{} ylist:{})".format(x_type, y_type) + ) if x_type not in (list, tuple): raise AssertionError( - 'List or tuple is expected, but was {}'.format(x_type)) + "List or tuple is expected, but was {}".format(x_type) + ) if len(xlist) != len(ylist): - raise AssertionError('List size is different') + raise AssertionError("List size is different") for x, y in zip(xlist, ylist): numpy.testing.assert_array_equal( - dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, - verbose=verbose) + dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, verbose=verbose + ) -def assert_array_less(x, y, err_msg='', verbose=True): +def assert_array_less(x, y, err_msg="", verbose=True): """Raises an AssertionError if array_like objects are not ordered by less than. Args: @@ -153,5 +167,5 @@ def assert_array_less(x, y, err_msg='', verbose=True): .. seealso:: :func:`numpy.testing.assert_array_less` """ # NOQA numpy.testing.assert_array_less( - dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, - verbose=verbose) + dpnp.asnumpy(x), dpnp.asnumpy(y), err_msg=err_msg, verbose=verbose + ) diff --git a/tests/third_party/cupy/testing/attr.py b/tests/third_party/cupy/testing/attr.py index 98e647d5dd31..a5bd49ef8c3c 100644 --- a/tests/third_party/cupy/testing/attr.py +++ b/tests/third_party/cupy/testing/attr.py @@ -1,9 +1,9 @@ import os import unittest - try: import pytest + _error = None except ImportError as e: _error = e @@ -15,10 +15,14 @@ def is_available(): def check_available(): if _error is not None: - raise RuntimeError('''\ + raise RuntimeError( + """\ {} is not available. -Reason: {}: {}'''.format(__name__, type(_error).__name__, _error)) +Reason: {}: {}""".format( + __name__, type(_error).__name__, _error + ) + ) def get_error(): @@ -26,7 +30,7 @@ def get_error(): if _error is None: - _gpu_limit = int(os.getenv('CUPY_TEST_GPU_LIMIT', '-1')) + _gpu_limit = int(os.getenv("CUPY_TEST_GPU_LIMIT", "-1")) def cudnn(*args, **kwargs): return pytest.mark.cudnn(*args, **kwargs) @@ -35,9 +39,10 @@ def slow(*args, **kwargs): return pytest.mark.slow(*args, **kwargs) else: + def _dummy_callable(*args, **kwargs): check_available() - assert False # Not reachable + raise AssertionError() # Not reachable cudnn = _dummy_callable slow = _dummy_callable @@ -55,8 +60,8 @@ def multi_gpu(gpu_num): check_available() return unittest.skipIf( - 0 <= _gpu_limit < gpu_num, - reason='{} GPUs required'.format(gpu_num)) + 0 <= _gpu_limit < gpu_num, reason="{} GPUs required".format(gpu_num) + ) def gpu(f): diff --git a/tests/third_party/cupy/testing/condition.py b/tests/third_party/cupy/testing/condition.py index d5f1e8db0e87..c0791d0872be 100644 --- a/tests/third_party/cupy/testing/condition.py +++ b/tests/third_party/cupy/testing/condition.py @@ -4,7 +4,6 @@ class QuietTestRunner(object): - def run(self, suite): result = unittest.TestResult() suite(result) @@ -39,14 +38,15 @@ def wrapper(*args, **kwargs): results = [] def fail(): - msg = '\nFail: {0}, Success: {1}'.format( - failure_counter, success_counter) + msg = "\nFail: {0}, Success: {1}".format( + failure_counter, success_counter + ) if len(results) > 0: first = results[0] errs = first.failures + first.errors if len(errs) > 0: - err_msg = '\n'.join(fail[1] for fail in errs) - msg += '\n\nThe first error message:\n' + err_msg + err_msg = "\n".join(fail[1] for fail in errs) + msg += "\n\nThe first error message:\n" + err_msg instance.fail(msg) for _ in range(times): @@ -56,9 +56,11 @@ def fail(): ins = type(instance)(instance._testMethodName) suite.addTest( unittest.FunctionTestCase( - lambda: f(ins, *args[1:], **kwargs), + lambda ins=ins: f(ins, *args[1:], **kwargs), setUp=ins.setUp, - tearDown=ins.tearDown)) + tearDown=ins.tearDown, + ) + ) result = QuietTestRunner().run(suite) if len(result.skipped) == 1: @@ -79,7 +81,9 @@ def fail(): fail() return fail() + return wrapper + return _repeat_with_success_at_least @@ -102,7 +106,7 @@ def repeat(times, intensive_times=None): if intensive_times is None: return repeat_with_success_at_least(times, times) - casual_test = bool(int(os.environ.get('CUPY_TEST_CASUAL', '0'))) + casual_test = bool(int(os.environ.get("CUPY_TEST_CASUAL", "0"))) times_ = times if casual_test else intensive_times return repeat_with_success_at_least(times_, times_) diff --git a/tests/third_party/cupy/testing/helper.py b/tests/third_party/cupy/testing/helper.py index 5f9864dadc59..8ec86e591be9 100644 --- a/tests/third_party/cupy/testing/helper.py +++ b/tests/third_party/cupy/testing/helper.py @@ -12,9 +12,10 @@ import dpnp import dpnp as cupy import dpnp as cupyx + # from dpnp.core import internal -from tests.third_party.cupy.testing import array -from tests.third_party.cupy.testing import parameterized +from tests.third_party.cupy.testing import array, parameterized + # import dpnp # import dpnp.scipy.sparse @@ -67,9 +68,11 @@ def _call_func_numpy(self, impl, args, kw, name, sp_name, scipy_name): kw[name] = numpy if sp_name: import scipy.sparse + kw[sp_name] = scipy.sparse if scipy_name: import scipy + kw[scipy_name] = scipy result, error, tb = _call_func(self, impl, args, kw) return result, error, tb @@ -78,60 +81,81 @@ def _call_func_numpy(self, impl, args, kw, name, sp_name, scipy_name): def _call_func_numpy_cupy(self, impl, args, kw, name, sp_name, scipy_name): # Run cupy cupy_result, cupy_error, cupy_tb = _call_func_cupy( - self, impl, args, kw, name, sp_name, scipy_name) + self, impl, args, kw, name, sp_name, scipy_name + ) # Run numpy numpy_result, numpy_error, numpy_tb = _call_func_numpy( - self, impl, args, kw, name, sp_name, scipy_name) + self, impl, args, kw, name, sp_name, scipy_name + ) return ( - cupy_result, cupy_error, cupy_tb, - numpy_result, numpy_error, numpy_tb) + cupy_result, + cupy_error, + cupy_tb, + numpy_result, + numpy_error, + numpy_tb, + ) _numpy_errors = [ - AttributeError, Exception, IndexError, TypeError, ValueError, - NotImplementedError, DeprecationWarning, - numpy.AxisError, numpy.linalg.LinAlgError, + AttributeError, + Exception, + IndexError, + TypeError, + ValueError, + NotImplementedError, + DeprecationWarning, + numpy.AxisError, + numpy.linalg.LinAlgError, ] def _check_numpy_cupy_error_compatible(cupy_error, numpy_error): - """Checks if try/except blocks are equivalent up to public error classes - """ + """Checks if try/except blocks are equivalent up to public error classes""" - return all([isinstance(cupy_error, err) == isinstance(numpy_error, err) - for err in _numpy_errors]) + return all( + [ + isinstance(cupy_error, err) == isinstance(numpy_error, err) + for err in _numpy_errors + ] + ) def _fail_test_with_unexpected_errors( - testcase, msg_format, cupy_error, cupy_tb, numpy_error, numpy_tb): + testcase, msg_format, cupy_error, cupy_tb, numpy_error, numpy_tb +): # Fails the test due to unexpected errors raised from the test. # msg_format may include format placeholders: # '{cupy_error}' '{cupy_tb}' '{numpy_error}' '{numpy_tb}' msg = msg_format.format( - cupy_error=''.join(str(cupy_error)), - cupy_tb=''.join(traceback.format_tb(cupy_tb)), - numpy_error=''.join(str(numpy_error)), - numpy_tb=''.join(traceback.format_tb(numpy_tb))) + cupy_error="".join(str(cupy_error)), + cupy_tb="".join(traceback.format_tb(cupy_tb)), + numpy_error="".join(str(numpy_error)), + numpy_tb="".join(traceback.format_tb(numpy_tb)), + ) # Fail the test with the traceback of the error (for pytest --pdb) try: testcase.fail(msg) except AssertionError as e: raise e.with_traceback(cupy_tb or numpy_tb) - assert False # never reach + raise AssertionError() # never reach -def _check_cupy_numpy_error(self, cupy_error, cupy_tb, numpy_error, - numpy_tb, accept_error=False): +def _check_cupy_numpy_error( + self, cupy_error, cupy_tb, numpy_error, numpy_tb, accept_error=False +): # Skip the test if both raised SkipTest. - if (isinstance(cupy_error, unittest.SkipTest) - and isinstance(numpy_error, unittest.SkipTest)): + if isinstance(cupy_error, unittest.SkipTest) and isinstance( + numpy_error, unittest.SkipTest + ): if cupy_error.args != numpy_error.args: raise AssertionError( - 'Both numpy and cupy were skipped but with different causes.') + "Both numpy and cupy were skipped but with different causes." + ) raise numpy_error # reraise SkipTest # For backward compatibility @@ -141,44 +165,62 @@ def _check_cupy_numpy_error(self, cupy_error, cupy_tb, numpy_error, accept_error = () # TODO(oktua): expected_regexp like numpy.testing.assert_raises_regex if cupy_error is None and numpy_error is None: - self.fail('Both cupy and numpy are expected to raise errors, but not') + self.fail("Both cupy and numpy are expected to raise errors, but not") elif cupy_error is None: _fail_test_with_unexpected_errors( self, - 'Only numpy raises error\n\n{numpy_tb}{numpy_error}', - None, None, numpy_error, numpy_tb) + "Only numpy raises error\n\n{numpy_tb}{numpy_error}", + None, + None, + numpy_error, + numpy_tb, + ) elif numpy_error is None: _fail_test_with_unexpected_errors( self, - 'Only cupy raises error\n\n{cupy_tb}{cupy_error}', - cupy_error, cupy_tb, None, None) + "Only cupy raises error\n\n{cupy_tb}{cupy_error}", + cupy_error, + cupy_tb, + None, + None, + ) elif not _check_numpy_cupy_error_compatible(cupy_error, numpy_error): _fail_test_with_unexpected_errors( self, - '''Different types of errors occurred + """Different types of errors occurred cupy {cupy_tb}{cupy_error} numpy {numpy_tb}{numpy_error} -''', - cupy_error, cupy_tb, numpy_error, numpy_tb) - - elif not (isinstance(cupy_error, accept_error) - and isinstance(numpy_error, accept_error)): +""", + cupy_error, + cupy_tb, + numpy_error, + numpy_tb, + ) + + elif not ( + isinstance(cupy_error, accept_error) + and isinstance(numpy_error, accept_error) + ): _fail_test_with_unexpected_errors( self, - '''Both cupy and numpy raise exceptions + """Both cupy and numpy raise exceptions cupy {cupy_tb}{cupy_error} numpy {numpy_tb}{numpy_error} -''', - cupy_error, cupy_tb, numpy_error, numpy_tb) +""", + cupy_error, + cupy_tb, + numpy_error, + numpy_tb, + ) def _make_positive_mask(self, impl, args, kw, name, sp_name, scipy_name): @@ -188,19 +230,22 @@ def _make_positive_mask(self, impl, args, kw, name, sp_name, scipy_name): for k in ks: kw[k] = numpy.intp result, error, tb = _call_func_cupy( - self, impl, args, kw, name, sp_name, scipy_name) + self, impl, args, kw, name, sp_name, scipy_name + ) assert error is None return dpnp.asnumpy(result) >= 0 def _contains_signed_and_unsigned(kw): vs = set(kw.values()) - return any(d in vs for d in _unsigned_dtypes) and \ - any(d in vs for d in _float_dtypes + _signed_dtypes) + return any(d in vs for d in _unsigned_dtypes) and any( + d in vs for d in _float_dtypes + _signed_dtypes + ) -def _make_decorator(check_func, name, type_check, accept_error, sp_name=None, - scipy_name=None): +def _make_decorator( + check_func, name, type_check, accept_error, sp_name=None, scipy_name=None +): assert isinstance(name, str) assert sp_name is None or isinstance(sp_name, str) assert scipy_name is None or isinstance(scipy_name, str) @@ -210,38 +255,52 @@ def decorator(impl): def test_func(self, *args, **kw): # Run cupy and numpy ( - cupy_result, cupy_error, cupy_tb, - numpy_result, numpy_error, numpy_tb) = ( - _call_func_numpy_cupy( - self, impl, args, kw, name, sp_name, scipy_name)) + cupy_result, + cupy_error, + cupy_tb, + numpy_result, + numpy_error, + numpy_tb, + ) = _call_func_numpy_cupy( + self, impl, args, kw, name, sp_name, scipy_name + ) assert cupy_result is not None or cupy_error is not None assert numpy_result is not None or numpy_error is not None # Check errors raised if cupy_error or numpy_error: - _check_cupy_numpy_error(self, cupy_error, cupy_tb, - numpy_error, numpy_tb, - accept_error=accept_error) + _check_cupy_numpy_error( + self, + cupy_error, + cupy_tb, + numpy_error, + numpy_tb, + accept_error=accept_error, + ) return # Check returned arrays if not isinstance(cupy_result, (tuple, list)): - cupy_result = cupy_result, + cupy_result = (cupy_result,) if not isinstance(numpy_result, (tuple, list)): - numpy_result = numpy_result, + numpy_result = (numpy_result,) assert len(cupy_result) == len(numpy_result) if type_check: for cupy_r, numpy_r in zip(cupy_result, numpy_result): if cupy_r.dtype != numpy_r.dtype: - print(f"\nERROR:\n\tcupy_r.dtype={cupy_r.dtype},\n\tnumpy_r.dtype={numpy_r.dtype}") + print( + f"\nERROR:\n\tcupy_r.dtype={cupy_r.dtype},\n\tnumpy_r.dtype={numpy_r.dtype}" + ) assert cupy_r.dtype == numpy_r.dtype for cupy_r, numpy_r in zip(cupy_result, numpy_result): if cupy_r.shape != numpy_r.shape: - print(f"\nERROR:\n\tcupy_r.shape={cupy_r.shape},\n\tnumpy_r.shape={numpy_r.shape}") + print( + f"\nERROR:\n\tcupy_r.shape={cupy_r.shape},\n\tnumpy_r.shape={numpy_r.shape}" + ) assert cupy_r.shape == numpy_r.shape # Behavior of assigning a negative value to an unsigned integer @@ -250,10 +309,13 @@ def test_func(self, *args, **kw): # To avoid this difference, we need to ignore dimensions whose # values are negative. skip = False - if (_contains_signed_and_unsigned(kw) - and cupy_r.dtype in _unsigned_dtypes): + if ( + _contains_signed_and_unsigned(kw) + and cupy_r.dtype in _unsigned_dtypes + ): mask = _make_positive_mask( - self, impl, args, kw, name, sp_name, scipy_name) + self, impl, args, kw, name, sp_name, scipy_name + ) if cupy_r.shape == (): skip = (mask == 0).all() else: @@ -262,13 +324,24 @@ def test_func(self, *args, **kw): if not skip: check_func(cupy_r, numpy_r) + return test_func + return decorator -def numpy_cupy_allclose(rtol=1e-7, atol=0, err_msg='', verbose=True, - name='xp', type_check=True, accept_error=False, - sp_name=None, scipy_name=None, contiguous_check=True): +def numpy_cupy_allclose( + rtol=1e-7, + atol=0, + err_msg="", + verbose=True, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, + contiguous_check=True, +): """Decorator that checks NumPy results and CuPy ones are close. Args: @@ -316,24 +389,34 @@ def numpy_cupy_allclose(rtol=1e-7, atol=0, err_msg='', verbose=True, .. seealso:: :func:`cupy.testing.assert_allclose` """ + def check_func(c, n): c_array = c n_array = n if sp_name is not None: import scipy.sparse + if cupyx.scipy.sparse.issparse(c): c_array = c.A if scipy.sparse.issparse(n): n_array = n.A array.assert_allclose(c_array, n_array, rtol, atol, err_msg, verbose) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name) - -def numpy_cupy_array_almost_equal(decimal=6, err_msg='', verbose=True, - name='xp', type_check=True, - accept_error=False, sp_name=None, - scipy_name=None): + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name + ) + + +def numpy_cupy_array_almost_equal( + decimal=6, + err_msg="", + verbose=True, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, +): """Decorator that checks NumPy results and CuPy ones are almost equal. Args: @@ -363,16 +446,23 @@ def numpy_cupy_array_almost_equal(decimal=6, err_msg='', verbose=True, .. seealso:: :func:`cupy.testing.assert_array_almost_equal` """ + def check_func(x, y): - array.assert_array_almost_equal( - x, y, decimal, err_msg, verbose) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name) + array.assert_array_almost_equal(x, y, decimal, err_msg, verbose) + + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name + ) -def numpy_cupy_array_almost_equal_nulp(nulp=1, name='xp', type_check=True, - accept_error=False, sp_name=None, - scipy_name=None): +def numpy_cupy_array_almost_equal_nulp( + nulp=1, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, +): """Decorator that checks results of NumPy and CuPy are equal w.r.t. spacing. Args: @@ -399,15 +489,24 @@ def numpy_cupy_array_almost_equal_nulp(nulp=1, name='xp', type_check=True, .. seealso:: :func:`cupy.testing.assert_array_almost_equal_nulp` """ # NOQA + def check_func(x, y): array.assert_array_almost_equal_nulp(x, y, nulp) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name=None) + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name=None + ) -def numpy_cupy_array_max_ulp(maxulp=1, dtype=None, name='xp', type_check=True, - accept_error=False, sp_name=None, - scipy_name=None): + +def numpy_cupy_array_max_ulp( + maxulp=1, + dtype=None, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, +): """Decorator that checks results of NumPy and CuPy ones are equal w.r.t. ulp. Args: @@ -438,15 +537,25 @@ def numpy_cupy_array_max_ulp(maxulp=1, dtype=None, name='xp', type_check=True, .. seealso:: :func:`cupy.testing.assert_array_max_ulp` """ # NOQA + def check_func(x, y): array.assert_array_max_ulp(x, y, maxulp, dtype) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name) - -def numpy_cupy_array_equal(err_msg='', verbose=True, name='xp', - type_check=True, accept_error=False, sp_name=None, - scipy_name=None, strides_check=False): + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name + ) + + +def numpy_cupy_array_equal( + err_msg="", + verbose=True, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, + strides_check=False, +): """Decorator that checks NumPy results and CuPy ones are equal. Args: @@ -477,9 +586,11 @@ def numpy_cupy_array_equal(err_msg='', verbose=True, name='xp', .. seealso:: :func:`cupy.testing.assert_array_equal` """ + def check_func(x, y): if sp_name is not None: import scipy.sparse + if cupyx.scipy.sparse.issparse(x): x = x.A if scipy.sparse.issparse(y): @@ -487,12 +598,14 @@ def check_func(x, y): array.assert_array_equal(x, y, err_msg, verbose, strides_check) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name) + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name + ) def numpy_cupy_array_list_equal( - err_msg='', verbose=True, name='xp', sp_name=None, scipy_name=None): + err_msg="", verbose=True, name="xp", sp_name=None, scipy_name=None +): """Decorator that checks the resulting lists of NumPy and CuPy's one are equal. Args: @@ -513,14 +626,22 @@ def numpy_cupy_array_list_equal( .. seealso:: :func:`cupy.testing.assert_array_list_equal` """ # NOQA + def check_func(x, y): array.assert_array_equal(x, y, err_msg, verbose) + return _make_decorator(check_func, name, False, False, sp_name, scipy_name) -def numpy_cupy_array_less(err_msg='', verbose=True, name='xp', - type_check=True, accept_error=False, sp_name=None, - scipy_name=None): +def numpy_cupy_array_less( + err_msg="", + verbose=True, + name="xp", + type_check=True, + accept_error=False, + sp_name=None, + scipy_name=None, +): """Decorator that checks the CuPy result is less than NumPy result. Args: @@ -548,13 +669,16 @@ def numpy_cupy_array_less(err_msg='', verbose=True, name='xp', .. seealso:: :func:`cupy.testing.assert_array_less` """ + def check_func(x, y): array.assert_array_less(x, y, err_msg, verbose) - return _make_decorator(check_func, name, type_check, accept_error, sp_name, - scipy_name) + + return _make_decorator( + check_func, name, type_check, accept_error, sp_name, scipy_name + ) -def numpy_cupy_equal(name='xp', sp_name=None, scipy_name=None): +def numpy_cupy_equal(name="xp", sp_name=None, scipy_name=None): """Decorator that checks NumPy results are equal to CuPy ones. Args: @@ -570,24 +694,39 @@ def numpy_cupy_equal(name='xp', sp_name=None, scipy_name=None): Decorated test fixture is required to return the same results even if ``xp`` is ``numpy`` or ``cupy``. """ + def decorator(impl): @functools.wraps(impl) def test_func(self, *args, **kw): # Run cupy and numpy - (cupy_result, cupy_error, cupy_tb, numpy_result, numpy_error, numpy_tb) = ( - _call_func_numpy_cupy(self, impl, args, kw, name, sp_name, scipy_name)) + ( + cupy_result, + cupy_error, + cupy_tb, + numpy_result, + numpy_error, + numpy_tb, + ) = _call_func_numpy_cupy( + self, impl, args, kw, name, sp_name, scipy_name + ) if cupy_result != numpy_result: - message = '''Results are not equal: + message = """Results are not equal: cupy: %s -numpy: %s''' % (str(cupy_result), str(numpy_result)) +numpy: %s""" % ( + str(cupy_result), + str(numpy_result), + ) raise AssertionError(message) + return test_func + return decorator -def numpy_cupy_raises(name='xp', sp_name=None, scipy_name=None, - accept_error=Exception): +def numpy_cupy_raises( + name="xp", sp_name=None, scipy_name=None, accept_error=Exception +): """Decorator that checks the NumPy and CuPy throw same errors. Args: @@ -609,24 +748,37 @@ def numpy_cupy_raises(name='xp', sp_name=None, scipy_name=None, Decorated test fixture is required throw same errors even if ``xp`` is ``numpy`` or ``cupy``. """ + def decorator(impl): @functools.wraps(impl) def test_func(self, *args, **kw): # Run cupy and numpy ( - cupy_result, cupy_error, cupy_tb, - numpy_result, numpy_error, numpy_tb) = ( - _call_func_numpy_cupy( - self, impl, args, kw, name, sp_name, scipy_name)) - - _check_cupy_numpy_error(self, cupy_error, cupy_tb, - numpy_error, numpy_tb, - accept_error=accept_error) + cupy_result, + cupy_error, + cupy_tb, + numpy_result, + numpy_error, + numpy_tb, + ) = _call_func_numpy_cupy( + self, impl, args, kw, name, sp_name, scipy_name + ) + + _check_cupy_numpy_error( + self, + cupy_error, + cupy_tb, + numpy_error, + numpy_tb, + accept_error=accept_error, + ) + return test_func + return decorator -def for_dtypes(dtypes, name='dtype'): +def for_dtypes(dtypes, name="dtype"): """Decorator for parameterized dtype test. Args: @@ -638,6 +790,7 @@ def for_dtypes(dtypes, name='dtype'): by passing the each element of ``dtypes`` to the named argument. """ + def decorator(impl): @functools.wraps(impl) def test_func(self, *args, **kw): @@ -645,13 +798,14 @@ def test_func(self, *args, **kw): try: kw[name] = numpy.dtype(dtype).type impl(self, *args, **kw) - except unittest.SkipTest as e: + except unittest.SkipTest: pass # print(f"Function decorator(): skipped: name={name} dtype={dtype} error={e}") except Exception: # print(f"Function decorator(): name={name} dtype={dtype}") raise return test_func + return decorator @@ -659,7 +813,7 @@ def test_func(self, *args, **kw): _regular_float_dtypes = (numpy.float64, numpy.float32) _float_dtypes = _regular_float_dtypes _signed_dtypes = () -_unsigned_dtypes = tuple(numpy.dtype(i).type for i in 'BHILQ') +_unsigned_dtypes = tuple(numpy.dtype(i).type for i in "BHILQ") _int_dtypes = _signed_dtypes + _unsigned_dtypes _int_bool_dtypes = _int_dtypes _regular_dtypes = _regular_float_dtypes + _int_bool_dtypes @@ -668,6 +822,8 @@ def test_func(self, *args, **kw): def _make_all_dtypes(no_float16, no_bool, no_complex): return (numpy.float64, numpy.float32, numpy.int64, numpy.int32) + + # if no_float16: # dtypes = _regular_float_dtypes # else: @@ -684,8 +840,9 @@ def _make_all_dtypes(no_float16, no_bool, no_complex): # return dtypes -def for_all_dtypes(name='dtype', no_float16=False, no_bool=False, - no_complex=False): +def for_all_dtypes( + name="dtype", no_float16=False, no_bool=False, no_complex=False +): """Decorator that checks the fixture with all dtypes. Args: @@ -740,11 +897,12 @@ def for_all_dtypes(name='dtype', no_float16=False, no_bool=False, .. seealso:: :func:`cupy.testing.for_dtypes` """ - return for_dtypes(_make_all_dtypes(no_float16, no_bool, no_complex), - name=name) + return for_dtypes( + _make_all_dtypes(no_float16, no_bool, no_complex), name=name + ) -def for_float_dtypes(name='dtype', no_float16=False): +def for_float_dtypes(name="dtype", no_float16=False): """Decorator that checks the fixture with float dtypes. Args: @@ -764,7 +922,7 @@ def for_float_dtypes(name='dtype', no_float16=False): return for_dtypes(_float_dtypes, name=name) -def for_signed_dtypes(name='dtype'): +def for_signed_dtypes(name="dtype"): """Decorator that checks the fixture with signed dtypes. Args: @@ -779,7 +937,7 @@ def for_signed_dtypes(name='dtype'): return for_dtypes(_signed_dtypes, name=name) -def for_unsigned_dtypes(name='dtype'): +def for_unsigned_dtypes(name="dtype"): """Decorator that checks the fixture with unsinged dtypes. Args: @@ -795,7 +953,7 @@ def for_unsigned_dtypes(name='dtype'): return for_dtypes(_unsigned_dtypes, name=name) -def for_int_dtypes(name='dtype', no_bool=False): +def for_int_dtypes(name="dtype", no_bool=False): """Decorator that checks the fixture with integer and optionally bool dtypes. Args: @@ -817,7 +975,7 @@ def for_int_dtypes(name='dtype', no_bool=False): return for_dtypes(_int_bool_dtypes, name=name) -def for_complex_dtypes(name='dtype'): +def for_complex_dtypes(name="dtype"): """Decorator that checks the fixture with complex dtypes. Args: @@ -831,7 +989,7 @@ def for_complex_dtypes(name='dtype'): return for_dtypes(_complex_dtypes, name=name) -def for_dtypes_combination(types, names=('dtype',), full=None): +def for_dtypes_combination(types, names=("dtype",), full=None): """Decorator that checks the fixture with a product set of dtypes. Args: @@ -861,11 +1019,11 @@ def for_dtypes_combination(types, names=('dtype',), full=None): types = list(types) if len(types) == 1: - name, = names + (name,) = names return for_dtypes(types, name) if full is None: - full = int(os.environ.get('CUPY_TEST_FULL_COMBINATION', '0')) != 0 + full = int(os.environ.get("CUPY_TEST_FULL_COMBINATION", "0")) != 0 if full: combination = parameterized.product({name: types for name in names}) @@ -895,12 +1053,17 @@ def test_func(self, *args, **kw): raise return test_func + return decorator -def for_all_dtypes_combination(names=('dtyes',), - no_float16=False, no_bool=False, full=None, - no_complex=False): +def for_all_dtypes_combination( + names=("dtyes",), + no_float16=False, + no_bool=False, + full=None, + no_complex=False, +): """Decorator that checks the fixture with a product set of all dtypes. Args: @@ -922,7 +1085,7 @@ def for_all_dtypes_combination(names=('dtyes',), return for_dtypes_combination(types, names, full) -def for_signed_dtypes_combination(names=('dtype',), full=None): +def for_signed_dtypes_combination(names=("dtype",), full=None): """Decorator for parameterized test w.r.t. the product set of signed dtypes. Args: @@ -937,7 +1100,7 @@ def for_signed_dtypes_combination(names=('dtype',), full=None): return for_dtypes_combination(_signed_dtypes, names=names, full=full) -def for_unsigned_dtypes_combination(names=('dtype',), full=None): +def for_unsigned_dtypes_combination(names=("dtype",), full=None): """Decorator for parameterized test w.r.t. the product set of unsigned dtypes. Args: @@ -952,7 +1115,7 @@ def for_unsigned_dtypes_combination(names=('dtype',), full=None): return for_dtypes_combination(_unsigned_dtypes, names=names, full=full) -def for_int_dtypes_combination(names=('dtype',), no_bool=False, full=None): +def for_int_dtypes_combination(names=("dtype",), no_bool=False, full=None): """Decorator for parameterized test w.r.t. the product set of int and boolean. Args: @@ -973,7 +1136,7 @@ def for_int_dtypes_combination(names=('dtype',), no_bool=False, full=None): return for_dtypes_combination(types, names, full) -def for_orders(orders, name='order'): +def for_orders(orders, name="order"): """Decorator to parameterize tests with order. Args: @@ -985,6 +1148,7 @@ def for_orders(orders, name='order'): ``orders`` to the named argument. """ + def decorator(impl): @functools.wraps(impl) def test_func(self, *args, **kw): @@ -993,14 +1157,15 @@ def test_func(self, *args, **kw): kw[name] = order impl(self, *args, **kw) except Exception: - print(name, 'is', order) + print(name, "is", order) raise return test_func + return decorator -def for_CF_orders(name='order'): +def for_CF_orders(name="order"): """Decorator that checks the fixture with orders 'C' and 'F'. Args: @@ -1009,7 +1174,7 @@ def for_CF_orders(name='order'): .. seealso:: :func:`cupy.testing.for_all_dtypes` """ - return for_orders([None, 'C', 'c'], name) + return for_orders([None, "C", "c"], name) def with_requires(*requirements): @@ -1041,7 +1206,7 @@ def with_requires(*requirements): except pkg_resources.ResolutionError: skip = True - msg = 'requires: {}'.format(','.join(requirements)) + msg = "requires: {}".format(",".join(requirements)) return unittest.skipIf(skip, msg) @@ -1055,7 +1220,7 @@ def numpy_satisfies(version_range): # See https://github.com/pypa/setuptools/issues/510 import pkg_resources - spec = 'numpy{}'.format(version_range) + spec = "numpy{}".format(version_range) try: pkg_resources.require(spec) except pkg_resources.VersionConflict: @@ -1063,8 +1228,8 @@ def numpy_satisfies(version_range): return True -def shaped_arange(shape, xp=dpnp, dtype=numpy.float64, order='C'): - """Returns an array with given shape, array module, and dtype. +def shaped_arange(shape, xp=dpnp, dtype=numpy.float64, order="C"): + r"""Returns an array with given shape, array module, and dtype. Args: shape(tuple of int): Shape of returned ndarray. @@ -1083,15 +1248,15 @@ def shaped_arange(shape, xp=dpnp, dtype=numpy.float64, order='C'): """ dtype = numpy.dtype(dtype) a = xp.arange(1, prod(shape) + 1, 1) - if dtype == '?': + if dtype == "?": a = a % 2 == 0 - elif dtype.kind == 'c': + elif dtype.kind == "c": a = a + a * 1j return xp.array(a.astype(dtype).reshape(shape), order=order, dtype=dtype) def shaped_reverse_arange(shape, xp=dpnp, dtype=numpy.float32): - """Returns an array filled with decreasing numbers. + r"""Returns an array filled with decreasing numbers. Args: shape(tuple of int): Shape of returned ndarray. @@ -1110,9 +1275,9 @@ def shaped_reverse_arange(shape, xp=dpnp, dtype=numpy.float32): size = prod(shape) # a = xp.arange(size, 0, -1) a = xp.arange(0, size) - if dtype == '?': + if dtype == "?": a = a % 2 == 0 - elif dtype.kind == 'c': + elif dtype.kind == "c": a = a + a * 1j return xp.array(a.astype(dtype).reshape(shape)) @@ -1141,9 +1306,9 @@ def shaped_random(shape, xp=dpnp, dtype=numpy.float64, scale=10, seed=0): """ numpy.random.seed(seed) dtype = numpy.dtype(dtype) - if dtype == '?': + if dtype == "?": return xp.asarray(numpy.random.randint(2, size=shape), dtype=dtype) - elif dtype.kind == 'c': + elif dtype.kind == "c": a = dpnp.random.rand(*shape) + 1j * numpy.random.rand(*shape) return xp.asarray(a * scale, dtype=dtype) else: @@ -1155,7 +1320,6 @@ def empty(xp=dpnp, dtype=numpy.float64): class NumpyError(object): - def __init__(self, **kw): self.kw = kw @@ -1170,7 +1334,7 @@ def __exit__(self, *_): @contextlib.contextmanager def assert_warns(expected): with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + warnings.simplefilter("always") yield if any(isinstance(m.message, expected) for m in w): @@ -1181,11 +1345,10 @@ def assert_warns(expected): except AttributeError: exc_name = str(expected) - raise AssertionError('%s not triggerred' % exc_name) + raise AssertionError("%s not triggerred" % exc_name) class NumpyAliasTestBase(unittest.TestCase): - @property def func(self): raise NotImplementedError() @@ -1200,7 +1363,6 @@ def numpy_func(self): class NumpyAliasBasicTestBase(NumpyAliasTestBase): - def test_argspec(self): f = inspect.signature assert f(self.cupy_func) == f(self.numpy_func) @@ -1208,13 +1370,12 @@ def test_argspec(self): def test_docstring(self): cupy_func = self.cupy_func numpy_func = self.numpy_func - assert hasattr(cupy_func, '__doc__') + assert hasattr(cupy_func, "__doc__") assert cupy_func.__doc__ is not None - assert cupy_func.__doc__ != '' + assert cupy_func.__doc__ != "" assert cupy_func.__doc__ is not numpy_func.__doc__ class NumpyAliasValuesTestBase(NumpyAliasTestBase): - def test_values(self): assert self.cupy_func(*self.args) == self.numpy_func(*self.args) diff --git a/tests/third_party/cupy/testing/hypothesis.py b/tests/third_party/cupy/testing/hypothesis.py index fe9cccea4b7a..3fd13748b7bc 100644 --- a/tests/third_party/cupy/testing/hypothesis.py +++ b/tests/third_party/cupy/testing/hypothesis.py @@ -25,7 +25,7 @@ def chi_square_test(observed, expected, alpha=0.05, df=None): elif alpha == 0.05: alpha_idx = 1 else: - raise ValueError('support only alpha == 0.05 or 0.01') + raise ValueError("support only alpha == 0.05 or 0.01") chi_square = numpy.sum((observed - expected) ** 2 / expected) return chi_square < chi_square_table[alpha_idx][df] @@ -33,105 +33,510 @@ def chi_square_test(observed, expected, alpha=0.05, df=None): # https://www.medcalc.org/manual/chi-square-table.php chi_square_table = [ - [None, - 6.635, 9.210, 11.345, 13.277, 15.086, - 16.812, 18.475, 20.090, 21.666, 23.209, - 24.725, 26.217, 27.688, 29.141, 30.578, - 32.000, 33.409, 34.805, 36.191, 37.566, - 38.932, 40.289, 41.638, 42.980, 44.314, - 45.642, 46.963, 48.278, 49.588, 50.892, - 52.191, 53.486, 54.776, 56.061, 57.342, - 58.619, 59.893, 61.162, 62.428, 63.691, - 64.950, 66.206, 67.459, 68.710, 69.957, - 71.201, 72.443, 73.683, 74.919, 76.154, - 77.386, 78.616, 79.843, 81.069, 82.292, - 83.513, 84.733, 85.950, 87.166, 88.379, - 89.591, 90.802, 92.010, 93.217, 94.422, - 95.626, 96.828, 98.028, 99.228, 100.425, - 101.621, 102.816, 104.010, 105.202, 106.393, - 107.583, 108.771, 109.958, 111.144, 112.329, - 113.512, 114.695, 115.876, 117.057, 118.236, - 119.414, 120.591, 121.767, 122.942, 124.116, - 125.289, 126.462, 127.633, 128.803, 129.973, - 131.141, 132.309, 133.476, 134.642, 135.807, - 136.971, 138.134, 139.297, 140.459, 141.620, - 142.780, 143.940, 145.099, 146.257, 147.414, - 148.571, 149.727, 150.882, 152.037, 153.191, - 154.344, 155.496, 156.648, 157.800, 158.950, - 160.100, 161.250, 162.398, 163.546, 164.694, - 165.841, 166.987, 168.133, 169.278, 170.423, - 171.567, 172.711, 173.854, 174.996, 176.138, - 177.280, 178.421, 179.561, 180.701, 181.840, - 182.979, 184.118, 185.256, 186.393, 187.530, - 188.666, 189.802, 190.938, 192.073, 193.208, - 194.342, 195.476, 196.609, 197.742, 198.874, - 200.006, 201.138, 202.269, 203.400, 204.530, - 205.660, 206.790, 207.919, 209.047, 210.176, - 211.304, 212.431, 213.558, 214.685, 215.812, - 216.938, 218.063, 219.189, 220.314, 221.438, - 222.563, 223.687, 224.810, 225.933, 227.056, - 228.179, 229.301, 230.423, 231.544, 232.665, - 233.786, 234.907, 236.027, 237.147, 238.266, - 239.386, 240.505, 241.623, 242.742, 243.860, - 244.977, 246.095, 247.212, 248.329, 249.445, - 250.561, 251.677, 252.793, 253.908, 255.023, - 256.138, 257.253, 258.367, 259.481, 260.595, - 261.708, 262.821, 263.934, 265.047, 266.159, - 267.271, 268.383, 269.495, 270.606, 271.717, - 272.828, 273.939, 275.049, 276.159, 277.269, - 278.379, 279.488, 280.597, 281.706, 282.814, - 283.923, 285.031, 286.139, 287.247, 288.354, - 289.461, 290.568, 291.675, 292.782, 293.888, - 294.994, 296.100, 297.206, 298.311, 299.417, - 300.522, 301.626, 302.731, 303.835, 304.940], - [None, - 3.841, 5.991, 7.815, 9.488, 11.070, - 12.592, 14.067, 15.507, 16.919, 18.307, - 19.675, 21.026, 22.362, 23.685, 24.996, - 26.296, 27.587, 28.869, 30.144, 31.410, - 32.671, 33.924, 35.172, 36.415, 37.652, - 38.885, 40.113, 41.337, 42.557, 43.773, - 44.985, 46.194, 47.400, 48.602, 49.802, - 50.998, 52.192, 53.384, 54.572, 55.758, - 56.942, 58.124, 59.304, 60.481, 61.656, - 62.830, 64.001, 65.171, 66.339, 67.505, - 68.669, 69.832, 70.993, 72.153, 73.311, - 74.468, 75.624, 76.778, 77.931, 79.082, - 80.232, 81.381, 82.529, 83.675, 84.821, - 85.965, 87.108, 88.250, 89.391, 90.531, - 91.670, 92.808, 93.945, 95.081, 96.217, - 97.351, 98.484, 99.617, 100.749, 101.879, - 103.010, 104.139, 105.267, 106.395, 107.522, - 108.648, 109.773, 110.898, 112.022, 113.145, - 114.268, 115.390, 116.511, 117.632, 118.752, - 119.871, 120.990, 122.108, 123.225, 124.342, - 125.458, 126.574, 127.689, 128.804, 129.918, - 131.031, 132.144, 133.257, 134.369, 135.480, - 136.591, 137.701, 138.811, 139.921, 141.030, - 142.138, 143.246, 144.354, 145.461, 146.567, - 147.674, 148.779, 149.885, 150.989, 152.094, - 153.198, 154.302, 155.405, 156.508, 157.610, - 158.712, 159.814, 160.915, 162.016, 163.116, - 164.216, 165.316, 166.415, 167.514, 168.613, - 169.711, 170.809, 171.907, 173.004, 174.101, - 175.198, 176.294, 177.390, 178.485, 179.581, - 180.676, 181.770, 182.865, 183.959, 185.052, - 186.146, 187.239, 188.332, 189.424, 190.516, - 191.608, 192.700, 193.791, 194.883, 195.973, - 197.064, 198.154, 199.244, 200.334, 201.423, - 202.513, 203.602, 204.690, 205.779, 206.867, - 207.955, 209.042, 210.130, 211.217, 212.304, - 213.391, 214.477, 215.563, 216.649, 217.735, - 218.820, 219.906, 220.991, 222.076, 223.160, - 224.245, 225.329, 226.413, 227.496, 228.580, - 229.663, 230.746, 231.829, 232.912, 233.994, - 235.077, 236.159, 237.240, 238.322, 239.403, - 240.485, 241.566, 242.647, 243.727, 244.808, - 245.888, 246.968, 248.048, 249.128, 250.207, - 251.286, 252.365, 253.444, 254.523, 255.602, - 256.680, 257.758, 258.837, 259.914, 260.992, - 262.070, 263.147, 264.224, 265.301, 266.378, - 267.455, 268.531, 269.608, 270.684, 271.760, - 272.836, 273.911, 274.987, 276.062, 277.138, - 278.213, 279.288, 280.362, 281.437, 282.511, - 283.586, 284.660, 285.734, 286.808, 287.882]] + [ + None, + 6.635, + 9.210, + 11.345, + 13.277, + 15.086, + 16.812, + 18.475, + 20.090, + 21.666, + 23.209, + 24.725, + 26.217, + 27.688, + 29.141, + 30.578, + 32.000, + 33.409, + 34.805, + 36.191, + 37.566, + 38.932, + 40.289, + 41.638, + 42.980, + 44.314, + 45.642, + 46.963, + 48.278, + 49.588, + 50.892, + 52.191, + 53.486, + 54.776, + 56.061, + 57.342, + 58.619, + 59.893, + 61.162, + 62.428, + 63.691, + 64.950, + 66.206, + 67.459, + 68.710, + 69.957, + 71.201, + 72.443, + 73.683, + 74.919, + 76.154, + 77.386, + 78.616, + 79.843, + 81.069, + 82.292, + 83.513, + 84.733, + 85.950, + 87.166, + 88.379, + 89.591, + 90.802, + 92.010, + 93.217, + 94.422, + 95.626, + 96.828, + 98.028, + 99.228, + 100.425, + 101.621, + 102.816, + 104.010, + 105.202, + 106.393, + 107.583, + 108.771, + 109.958, + 111.144, + 112.329, + 113.512, + 114.695, + 115.876, + 117.057, + 118.236, + 119.414, + 120.591, + 121.767, + 122.942, + 124.116, + 125.289, + 126.462, + 127.633, + 128.803, + 129.973, + 131.141, + 132.309, + 133.476, + 134.642, + 135.807, + 136.971, + 138.134, + 139.297, + 140.459, + 141.620, + 142.780, + 143.940, + 145.099, + 146.257, + 147.414, + 148.571, + 149.727, + 150.882, + 152.037, + 153.191, + 154.344, + 155.496, + 156.648, + 157.800, + 158.950, + 160.100, + 161.250, + 162.398, + 163.546, + 164.694, + 165.841, + 166.987, + 168.133, + 169.278, + 170.423, + 171.567, + 172.711, + 173.854, + 174.996, + 176.138, + 177.280, + 178.421, + 179.561, + 180.701, + 181.840, + 182.979, + 184.118, + 185.256, + 186.393, + 187.530, + 188.666, + 189.802, + 190.938, + 192.073, + 193.208, + 194.342, + 195.476, + 196.609, + 197.742, + 198.874, + 200.006, + 201.138, + 202.269, + 203.400, + 204.530, + 205.660, + 206.790, + 207.919, + 209.047, + 210.176, + 211.304, + 212.431, + 213.558, + 214.685, + 215.812, + 216.938, + 218.063, + 219.189, + 220.314, + 221.438, + 222.563, + 223.687, + 224.810, + 225.933, + 227.056, + 228.179, + 229.301, + 230.423, + 231.544, + 232.665, + 233.786, + 234.907, + 236.027, + 237.147, + 238.266, + 239.386, + 240.505, + 241.623, + 242.742, + 243.860, + 244.977, + 246.095, + 247.212, + 248.329, + 249.445, + 250.561, + 251.677, + 252.793, + 253.908, + 255.023, + 256.138, + 257.253, + 258.367, + 259.481, + 260.595, + 261.708, + 262.821, + 263.934, + 265.047, + 266.159, + 267.271, + 268.383, + 269.495, + 270.606, + 271.717, + 272.828, + 273.939, + 275.049, + 276.159, + 277.269, + 278.379, + 279.488, + 280.597, + 281.706, + 282.814, + 283.923, + 285.031, + 286.139, + 287.247, + 288.354, + 289.461, + 290.568, + 291.675, + 292.782, + 293.888, + 294.994, + 296.100, + 297.206, + 298.311, + 299.417, + 300.522, + 301.626, + 302.731, + 303.835, + 304.940, + ], + [ + None, + 3.841, + 5.991, + 7.815, + 9.488, + 11.070, + 12.592, + 14.067, + 15.507, + 16.919, + 18.307, + 19.675, + 21.026, + 22.362, + 23.685, + 24.996, + 26.296, + 27.587, + 28.869, + 30.144, + 31.410, + 32.671, + 33.924, + 35.172, + 36.415, + 37.652, + 38.885, + 40.113, + 41.337, + 42.557, + 43.773, + 44.985, + 46.194, + 47.400, + 48.602, + 49.802, + 50.998, + 52.192, + 53.384, + 54.572, + 55.758, + 56.942, + 58.124, + 59.304, + 60.481, + 61.656, + 62.830, + 64.001, + 65.171, + 66.339, + 67.505, + 68.669, + 69.832, + 70.993, + 72.153, + 73.311, + 74.468, + 75.624, + 76.778, + 77.931, + 79.082, + 80.232, + 81.381, + 82.529, + 83.675, + 84.821, + 85.965, + 87.108, + 88.250, + 89.391, + 90.531, + 91.670, + 92.808, + 93.945, + 95.081, + 96.217, + 97.351, + 98.484, + 99.617, + 100.749, + 101.879, + 103.010, + 104.139, + 105.267, + 106.395, + 107.522, + 108.648, + 109.773, + 110.898, + 112.022, + 113.145, + 114.268, + 115.390, + 116.511, + 117.632, + 118.752, + 119.871, + 120.990, + 122.108, + 123.225, + 124.342, + 125.458, + 126.574, + 127.689, + 128.804, + 129.918, + 131.031, + 132.144, + 133.257, + 134.369, + 135.480, + 136.591, + 137.701, + 138.811, + 139.921, + 141.030, + 142.138, + 143.246, + 144.354, + 145.461, + 146.567, + 147.674, + 148.779, + 149.885, + 150.989, + 152.094, + 153.198, + 154.302, + 155.405, + 156.508, + 157.610, + 158.712, + 159.814, + 160.915, + 162.016, + 163.116, + 164.216, + 165.316, + 166.415, + 167.514, + 168.613, + 169.711, + 170.809, + 171.907, + 173.004, + 174.101, + 175.198, + 176.294, + 177.390, + 178.485, + 179.581, + 180.676, + 181.770, + 182.865, + 183.959, + 185.052, + 186.146, + 187.239, + 188.332, + 189.424, + 190.516, + 191.608, + 192.700, + 193.791, + 194.883, + 195.973, + 197.064, + 198.154, + 199.244, + 200.334, + 201.423, + 202.513, + 203.602, + 204.690, + 205.779, + 206.867, + 207.955, + 209.042, + 210.130, + 211.217, + 212.304, + 213.391, + 214.477, + 215.563, + 216.649, + 217.735, + 218.820, + 219.906, + 220.991, + 222.076, + 223.160, + 224.245, + 225.329, + 226.413, + 227.496, + 228.580, + 229.663, + 230.746, + 231.829, + 232.912, + 233.994, + 235.077, + 236.159, + 237.240, + 238.322, + 239.403, + 240.485, + 241.566, + 242.647, + 243.727, + 244.808, + 245.888, + 246.968, + 248.048, + 249.128, + 250.207, + 251.286, + 252.365, + 253.444, + 254.523, + 255.602, + 256.680, + 257.758, + 258.837, + 259.914, + 260.992, + 262.070, + 263.147, + 264.224, + 265.301, + 266.378, + 267.455, + 268.531, + 269.608, + 270.684, + 271.760, + 272.836, + 273.911, + 274.987, + 276.062, + 277.138, + 278.213, + 279.288, + 280.362, + 281.437, + 282.511, + 283.586, + 284.660, + 285.734, + 286.808, + 287.882, + ], +] diff --git a/tests/third_party/cupy/testing/parameterized.py b/tests/third_party/cupy/testing/parameterized.py index f9888e9cd5ae..7cd9de5305d5 100644 --- a/tests/third_party/cupy/testing/parameterized.py +++ b/tests/third_party/cupy/testing/parameterized.py @@ -1,6 +1,6 @@ import functools -import itertools import io +import itertools import types import typing as tp # NOQA import unittest @@ -11,7 +11,7 @@ def _param_to_str(obj): if isinstance(obj, type): return obj.__name__ - elif hasattr(obj, '__name__') and isinstance(obj.__name__, str): + elif hasattr(obj, "__name__") and isinstance(obj.__name__, str): # print __name__ attribute for classes, functions and modules return obj.__name__ return repr(obj) @@ -20,7 +20,7 @@ def _param_to_str(obj): def _shorten(s, maxlen): # Shortens the string down to maxlen, by replacing the middle part with # a 3-dots string '...'. - ellipsis = '...' + ellipsis = "..." if len(s) <= maxlen: return s n1 = (maxlen - len(ellipsis)) // 2 @@ -35,11 +35,11 @@ def _make_class_name(base_class_name, i_param, param): SINGLE_PARAM_MAXLEN = 100 # Length limit of a single parameter value PARAMS_MAXLEN = 5000 # Length limit of the whole parameters part param_strs = [ - '{}={}'.format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN)) - for k, v in sorted(param.items())] - param_strs = _shorten(', '.join(param_strs), PARAMS_MAXLEN) - cls_name = '{}_param_{}_{{{}}}'.format( - base_class_name, i_param, param_strs) + "{}={}".format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN)) + for k, v in sorted(param.items()) + ] + param_strs = _shorten(", ".join(param_strs), PARAMS_MAXLEN) + cls_name = "{}_param_{}_{{{}}}".format(base_class_name, i_param, param_strs) return cls_name @@ -55,17 +55,16 @@ def _parameterize_test_case(base, i, param): def __str__(self): name = base.__str__(self) - return '%s parameter: %s' % (name, param) + return "%s parameter: %s" % (name, param) - mb = {'__str__': __str__} + mb = {"__str__": __str__} for k, v in sorted(param.items()): if isinstance(v, types.FunctionType): - def create_new_v(): - f = v - + def create_new_v(f=v): def new_v(self, *args, **kwargs): return f(*args, **kwargs) + return new_v mb[k] = create_new_v() @@ -83,13 +82,17 @@ def new_method(self, *args, **kwargs): raise except Exception as e: s = io.StringIO() - s.write('Parameterized test failed.\n\n') - s.write('Base test method: {}.{}\n'.format( - base.__name__, base_method.__name__)) - s.write('Test parameters:\n') + s.write("Parameterized test failed.\n\n") + s.write( + "Base test method: {}.{}\n".format( + base.__name__, base_method.__name__ + ) + ) + s.write("Test parameters:\n") for k, v in sorted(param.items()): - s.write(' {}: {}\n'.format(k, v)) + s.write(" {}: {}\n".format(k, v)) raise e.__class__(s.getvalue()).with_traceback(e.__traceback__) + return new_method return (cls_name, mb, method_generator) @@ -98,7 +101,8 @@ def new_method(self, *args, **kwargs): def parameterize(*params): # TODO(niboshi): Add documentation return _bundle.make_decorator( - lambda base: _parameterize_test_case_generator(base, params)) + lambda base: _parameterize_test_case_generator(base, params) + ) def _values_to_dicts(names, values): @@ -111,7 +115,7 @@ def safe_zip(ns, vs): assert isinstance(vs, (tuple, list)) and len(ns) == len(vs) return zip(ns, vs) - names = names.split(',') + names = names.split(",") params = [dict(safe_zip(names, value_list)) for value_list in values] return params @@ -131,26 +135,32 @@ def parameterize_pytest(names, values): def product(parameter): # TODO(niboshi): Add documentation if isinstance(parameter, dict): - return product_dict(*[ - _values_to_dicts(names, values) - for names, values in sorted(parameter.items())]) + return product_dict( + *[ + _values_to_dicts(names, values) + for names, values in sorted(parameter.items()) + ] + ) elif isinstance(parameter, list): # list of lists of dicts if not all(isinstance(_, list) for _ in parameter): - raise TypeError('parameter must be list of lists of dicts') - if not all(isinstance(_, dict) for l in parameter for _ in l): - raise TypeError('parameter must be list of lists of dicts') + raise TypeError("parameter must be list of lists of dicts") + if not all(isinstance(_, dict) for p in parameter for _ in p): + raise TypeError("parameter must be list of lists of dicts") return product_dict(*parameter) else: raise TypeError( - 'parameter must be either dict or list. Actual: {}'.format( - type(parameter))) + "parameter must be either dict or list. Actual: {}".format( + type(parameter) + ) + ) def product_dict(*parameters): # TODO(niboshi): Add documentation return [ {k: v for dic in dicts for k, v in dic.items()} - for dicts in itertools.product(*parameters)] + for dicts in itertools.product(*parameters) + ] diff --git a/tests/third_party/cupy/testing/random.py b/tests/third_party/cupy/testing/random.py index 84487c0653ce..444f2b3352c8 100644 --- a/tests/third_party/cupy/testing/random.py +++ b/tests/third_party/cupy/testing/random.py @@ -1,13 +1,13 @@ import atexit import functools -import numpy import os import random import types import unittest -import dpnp as cupy +import numpy +import dpnp as cupy _old_python_random_state = None _old_numpy_random_state = None @@ -25,8 +25,7 @@ def do_setup(deterministic=True): # Check that _random_state has been recreated in # cupy.random.reset_states(). Otherwise the contents of # _old_cupy_random_states would be overwritten. - assert (cupy.random.generator._random_states is not - _old_cupy_random_states) + assert cupy.random.generator._random_states is not _old_cupy_random_states if not deterministic: random.seed() @@ -58,36 +57,34 @@ def do_teardown(): @atexit.register def _check_teardown(): - assert _nest_count == 0, ('_setup_random() and _teardown_random() ' - 'must be called in pairs.') + assert _nest_count == 0, ( + "_setup_random() and _teardown_random() " "must be called in pairs." + ) def _setup_random(): - """Sets up the deterministic random states of ``numpy`` and ``cupy``. - - """ + """Sets up the deterministic random states of ``numpy`` and ``cupy``.""" global _nest_count if _nest_count == 0: - nondeterministic = bool(int(os.environ.get( - 'CUPY_TEST_RANDOM_NONDETERMINISTIC', '0'))) + nondeterministic = bool( + int(os.environ.get("CUPY_TEST_RANDOM_NONDETERMINISTIC", "0")) + ) do_setup(not nondeterministic) _nest_count += 1 def _teardown_random(): - """Tears down the deterministic random states set up by ``_setup_random``. - - """ + """Tears down the deterministic random states set up by ``_setup_random``.""" global _nest_count - assert _nest_count > 0, '_setup_random has not been called' + assert _nest_count > 0, "_setup_random has not been called" _nest_count -= 1 if _nest_count == 0: do_teardown() def generate_seed(): - assert _nest_count > 0, 'random is not set up' - return numpy.random.randint(0x7fffffff) + assert _nest_count > 0, "random is not set up" + return numpy.random.randint(0x7FFFFFFF) def fix_random(): @@ -104,8 +101,9 @@ def fix_random(): # these decorators. def decorator(impl): - if (isinstance(impl, types.FunctionType) and - impl.__name__.startswith('test_')): + if isinstance(impl, types.FunctionType) and impl.__name__.startswith( + "test_" + ): # Applied to test method @functools.wraps(impl) def test_func(self, *args, **kw): @@ -114,6 +112,7 @@ def test_func(self, *args, **kw): impl(self, *args, **kw) finally: _teardown_random() + return test_func elif isinstance(impl, type) and issubclass(impl, unittest.TestCase): # Applied to test case class @@ -123,6 +122,7 @@ def wrap_setUp(f): def func(self): _setup_random() f(self) + return func def wrap_tearDown(f): @@ -131,12 +131,13 @@ def func(self): f(self) finally: _teardown_random() + return func klass.setUp = wrap_setUp(klass.setUp) klass.tearDown = wrap_tearDown(klass.tearDown) return klass else: - raise ValueError('Can\'t apply fix_random to {}'.format(impl)) + raise ValueError("Can't apply fix_random to {}".format(impl)) return decorator diff --git a/tests/third_party/intel/test_zero_copy_test1.py b/tests/third_party/intel/test_zero_copy_test1.py index f573f42ea2cb..9c9d0fa9dbad 100644 --- a/tests/third_party/intel/test_zero_copy_test1.py +++ b/tests/third_party/intel/test_zero_copy_test1.py @@ -1,5 +1,6 @@ -import sys import importlib +import sys + import pytest @@ -7,7 +8,7 @@ class dummymodule: pass -sys.modules['numba_dppy'] = dummymodule +sys.modules["numba_dppy"] = dummymodule module_not_found = False diff --git a/tests/third_party/intel/zero-copy-test1.py b/tests/third_party/intel/zero-copy-test1.py index 0d9040311d81..340df2af3a17 100644 --- a/tests/third_party/intel/zero-copy-test1.py +++ b/tests/third_party/intel/zero-copy-test1.py @@ -1,10 +1,11 @@ import dpctl import dpctl.memory as dpmem -import dpnp +import dpctl.tensor.numpy_usm_shared as usmarray +import numba_dppy as dppy import numpy as np import pytest -import numba_dppy as dppy -import dpctl.tensor.numpy_usm_shared as usmarray + +import dpnp class DuckUSMArray: @@ -32,9 +33,7 @@ def __sycl_usm_array_interface__(self): def test_dpnp_interaction_with_dpctl_memory(): - """Tests if dpnp supports zero-copy data exchange with another Python - object that defines `__sycl_usm_array_interface__` - """ + """Tests if dpnp supports zero-copy data exchange with another Python object that defines `__sycl_usm_array_interface__`.""" hb = np.arange(0, 100, dtype=np.int64) da = DuckUSMArray(hb.shape, dtype=hb.dtype, host_buffer=hb) @@ -48,9 +47,7 @@ def test_dpnp_interaction_with_dpctl_memory(): def test_dppy_array_pass(): - """Tests if dppy supports passing an array-like object DuckArray that defines `__sycl_usm_array_interface__` - to a dppy.kernel - """ + """Tests if dppy supports passing an array-like object DuckArray that defines `__sycl_usm_array_interface__` to a dppy.kernel.""" @dppy.kernel def dppy_f(array_like_obj): @@ -63,7 +60,7 @@ def dppy_f(array_like_obj): if dpctl.has_gpu_queues(dpctl.backend_type.level_zero): print("\nScheduling on OpenCL GPU\n") - with dpctl.device_context("opencl:gpu") as gpu_queue: + with dpctl.device_context("opencl:gpu"): dppy_f[global_size, dppy.DEFAULT_LOCAL_SIZE](da) else: print("\nSkip scheduling on OpenCL GPU\n") @@ -72,12 +69,12 @@ def dppy_f(array_like_obj): def test_dpctl_dparray_has_iface(): - """Tests if dpctl.dptensor.numpy_usm_shared defines '__sycl_usm_array_interface__'""" + """Tests if dpctl.dptensor.numpy_usm_shared defines '__sycl_usm_array_interface__'.""" X = usmarray.ones(10) assert type(getattr(X, "__sycl_usm_array_interface__", None) is dict) def test_dpnp_array_has_iface(): - """Tests if dpnp.ndarray defines '__sycl_usm_array_interface__'""" + """Tests if dpnp.ndarray defines '__sycl_usm_array_interface__'.""" X = dpnp.array([1]) assert type(getattr(X, "__sycl_usm_array_interface__", None) is dict) diff --git a/tests/third_party/numpy_ext/__init__.py b/tests/third_party/numpy_ext/__init__.py index ee770ae9364c..1fc26b9a558a 100644 --- a/tests/third_party/numpy_ext/__init__.py +++ b/tests/third_party/numpy_ext/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -26,6 +26,7 @@ """ To run numpy tests under dpnp. + >>> python -m tests.third_party.numpy_ext to run specific test suite: >>> python -m tests.third_party.numpy_ext core/tests/test_umath.py @@ -36,6 +37,5 @@ import dpnp from tests_external.numpy import runtests - code = runtests.run() exit(code) diff --git a/tests_external/numpy/runtests.py b/tests_external/numpy/runtests.py index 70c7d1712945..6a0017167a4e 100644 --- a/tests_external/numpy/runtests.py +++ b/tests_external/numpy/runtests.py @@ -2,7 +2,7 @@ # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -28,6 +28,7 @@ """ Script to run numpy tests under dpnp. + >>> python -m tests_external.numpy.runtests to run specific test suite: >>> python -m tests_external.numpy.runtests core/tests/test_umath.py @@ -35,20 +36,19 @@ >>> python -m tests_external.numpy.runtests core/tests/test_umath.py::TestHypot::test_simple """ -import numpy.conftest -import numpy.core._rational_tests -import numpy import argparse -import unittest import site import sys import types - +import unittest from pathlib import Path +import numpy +import numpy.conftest +import numpy.core._rational_tests import pytest -import dpnp +import dpnp from dpnp.dparray import dparray @@ -76,10 +76,10 @@ class dummy_multiarray_tests(dummymodule): run_sortkind_converter = dummy_func -dummy_sctypes = {'uint': [], 'int': [], 'float': []} +dummy_sctypes = {"uint": [], "int": [], "float": []} -def define_func_types(mod, func_names, types_, default=''): +def define_func_types(mod, func_names, types_, default=""): """Define attribute types to specified functions of specified module""" for obj in mod.__dict__.values(): if not isinstance(obj, types.FunctionType): @@ -93,6 +93,7 @@ def define_func_types(mod, func_names, types_, default=''): def redefine_strides(f): """Redefine attribute strides in dparray returned by specified function""" + def wrapper(*args, **kwargs): res = f(*args, **kwargs) if not isinstance(res, dparray): @@ -108,6 +109,7 @@ def wrapper(*args, **kwargs): def replace_arg_value(f, arg_pos, in_values, out_value): """Replace value of positional argument of specified function""" + def wrapper(*args, **kwargs): if len(args) <= arg_pos: return f(*args, **kwargs) @@ -125,6 +127,7 @@ def wrapper(*args, **kwargs): def replace_kwarg_value(f, arg_name, in_values, out_value): """Replace value of keyword argument of specified function""" + def wrapper(*args, **kwargs): arg_value = kwargs.get(arg_name) for in_value in in_values: @@ -138,14 +141,48 @@ def wrapper(*args, **kwargs): # setting some dummy attrubutes to dpnp unsupported_classes = [ - 'byte', 'bytes_', 'cdouble', 'character', 'clongdouble', 'complex_', - 'complexfloating', 'datetime64', 'flexible', 'floating', - 'generic', 'half', 'inexact', 'int_', 'int16', 'int8', 'intc', 'integer', - 'longlong', 'matrix', 'memmap', 'nditer', 'nextafter', - 'number', 'object_', 'short', 'signedinteger', 'single', 'stack', - 'timedelta64', 'ubyte', 'uint', 'uint16', 'uint32', 'uint64', 'uint8', - 'uintc', 'ulonglong', 'unsignedinteger', 'ushort', 'vectorize', - 'VisibleDeprecationWarning' + "byte", + "bytes_", + "cdouble", + "character", + "clongdouble", + "complex_", + "complexfloating", + "datetime64", + "flexible", + "floating", + "generic", + "half", + "inexact", + "int_", + "int16", + "int8", + "intc", + "integer", + "longlong", + "matrix", + "memmap", + "nditer", + "nextafter", + "number", + "object_", + "short", + "signedinteger", + "single", + "stack", + "timedelta64", + "ubyte", + "uint", + "uint16", + "uint32", + "uint64", + "uint8", + "uintc", + "ulonglong", + "unsignedinteger", + "ushort", + "vectorize", + "VisibleDeprecationWarning", ] for klass in unsupported_classes: setattr(dpnp, klass, DummyClass) @@ -193,36 +230,55 @@ def wrapper(*args, **kwargs): array_input_replace_map = [ (dpnp.nan, [dpnp.nan]), ([None], [dpnp.nan]), - ([2. + 1j, 1. + 2j], []), - ([2. + 1j, 1. + 2j, 3. - 3j], []), - ([['one', 'two'], ['three', 'four']], [[], []]), - ([[1., 2 + 3j], [2 - 3j, 1]], [[], []]), - ([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], [[], []]), - ([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], [[], []]), - ([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], [[], []]), - ([[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], [[], []]), - ([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], [[2., 1.], [1., 2.], [1., 2.]]), - ([[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], [[1., 2.], [3., 4.], [5., 6.]]), + ([2.0 + 1j, 1.0 + 2j], []), + ([2.0 + 1j, 1.0 + 2j, 3.0 - 3j], []), + ([["one", "two"], ["three", "four"]], [[], []]), + ([[1.0, 2 + 3j], [2 - 3j, 1]], [[], []]), + ([[1.0 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], [[], []]), + ([[2.0 + 1j, 1.0 + 2j], [1 - 1j, 2 - 2j]], [[], []]), + ([[2.0 + 1j, 1.0 + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], [[], []]), + ( + [[1.0 + 1j, 2.0 + 2j, 3.0 - 3j], [3.0 - 5j, 4.0 + 9j, 6.0 + 2j]], + [[], []], + ), + ( + [[2.0 + 1j, 1.0 + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], + [[2.0, 1.0], [1.0, 2.0], [1.0, 2.0]], + ), + ( + [[1.0 + 1j, 2.0 + 2j], [3.0 - 3j, 4.0 - 9j], [5.0 - 4j, 6.0 + 8j]], + [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], + ), ] for in_value, out_value in array_input_replace_map: dpnp.array = replace_arg_value(dpnp.array, 0, [in_value], out_value) rational = numpy.core._rational_tests.rational -dpnp.array = replace_kwarg_value(dpnp.array, 'dtype', ['m8', dpnp.uint8, 'i4,i4', object, rational], None) -dpnp.array = replace_arg_value(dpnp.array, 1, ['i,i'], None) +dpnp.array = replace_kwarg_value( + dpnp.array, "dtype", ["m8", dpnp.uint8, "i4,i4", object, rational], None +) +dpnp.array = replace_arg_value(dpnp.array, 1, ["i,i"], None) -dpnp.full = replace_arg_value(dpnp.full, 1, [-2**64 + 1], 0) -dpnp.full = replace_kwarg_value(dpnp.full, 'dtype', [object], None) +dpnp.full = replace_arg_value(dpnp.full, 1, [-(2**64) + 1], 0) +dpnp.full = replace_kwarg_value(dpnp.full, "dtype", [object], None) dpnp.ones = redefine_strides(dpnp.ones) -dpnp.ones = replace_kwarg_value(dpnp.ones, 'dtype', ['i,i'], None) -dpnp.zeros = replace_kwarg_value(dpnp.zeros, 'dtype', [ - 'm8', dpnp.dtype(dict( - formats=['O' in uf.types} -define_func_types(dpnp, unary_object_ufuncs_names, 'O->O') +unary_ufuncs = [ + obj + for obj in numpy.core.umath.__dict__.values() + if isinstance(obj, numpy.ufunc) +] +unary_object_ufuncs_names = { + uf.__name__ for uf in unary_ufuncs if "O->O" in uf.types +} +define_func_types(dpnp, unary_object_ufuncs_names, "O->O") dpnp.conftest = numpy.conftest del numpy -sys.modules['numpy'] = dpnp # next import of numpy will be replaced with dpnp +sys.modules["numpy"] = dpnp # next import of numpy will be replaced with dpnp NUMPY_TESTS = [ - 'core', - 'fft', - 'linalg/tests/test_build.py', - 'linalg/tests/test_deprecations.py', + "core", + "fft", + "linalg/tests/test_build.py", + "linalg/tests/test_deprecations.py", # disabled due to __setitem__ limitation: # https://github.com/numpy/numpy/blob/d7a75e8e8fefc433cf6e5305807d5f3180954273/numpy/linalg/tests/test_linalg.py#L293 # 'linalg/tests/test_linalg.py', - 'linalg/tests/test_regression.py', - 'random', + "linalg/tests/test_regression.py", + "random", ] NUMPY_NOT_FOUND = 3 TESTS_EXT_PATH = Path(__file__).parents[1] -ABORTED_TESTS_FILE = TESTS_EXT_PATH / 'skipped_tests_numpy_aborted.tbl' -SKIPPED_TESTS_FILE = TESTS_EXT_PATH / 'skipped_tests_numpy.tbl' -FAILED_TESTS_FILE = TESTS_EXT_PATH / 'failed_tests_numpy.tbl' +ABORTED_TESTS_FILE = TESTS_EXT_PATH / "skipped_tests_numpy_aborted.tbl" +SKIPPED_TESTS_FILE = TESTS_EXT_PATH / "skipped_tests_numpy.tbl" +FAILED_TESTS_FILE = TESTS_EXT_PATH / "failed_tests_numpy.tbl" def get_excluded_tests(): @@ -322,7 +392,7 @@ def get_excluded_tests(): def pytest_collection_modifyitems(config, items): - skip_mark = pytest.mark.skip(reason='Skipping test.') + skip_mark = pytest.mark.skip(reason="Skipping test.") for item in items: test_name = item.nodeid.strip() @@ -340,9 +410,9 @@ def pytest_runtest_makereport(item, call): if not rep.failed: return None - mode = 'a' if FAILED_TESTS_FILE.exists() else 'w' + mode = "a" if FAILED_TESTS_FILE.exists() else "w" with FAILED_TESTS_FILE.open(mode) as f: - f.write(rep.nodeid.strip() + '\n') + f.write(rep.nodeid.strip() + "\n") dpnp.conftest.pytest_collection_modifyitems = pytest_collection_modifyitems @@ -362,7 +432,7 @@ def find_pkg(name): def tests_from_cmdline(): """Get relative paths to tests from command line arguments.""" parser = argparse.ArgumentParser() - parser.add_argument('tests', nargs='*', help='list of tests to run') + parser.add_argument("tests", nargs="*", help="list of tests to run") args = parser.parse_args() return args.tests @@ -383,9 +453,9 @@ def get_tests(base_path): def run(): - numpy_path = find_pkg('numpy') + numpy_path = find_pkg("numpy") if numpy_path is None: - print('Numpy not found in the environment.') + print("Numpy not found in the environment.") return NUMPY_NOT_FOUND if FAILED_TESTS_FILE.exists(): @@ -404,5 +474,5 @@ def run(): return code -if __name__ == '__main__': +if __name__ == "__main__": exit(run()) diff --git a/utils/command_build_clib.py b/utils/command_build_clib.py index 18a08b10468d..d430c046ca51 100644 --- a/utils/command_build_clib.py +++ b/utils/command_build_clib.py @@ -24,7 +24,9 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -"""Module to customize build_clib command +""" +Module to customize build_clib command. + Originally, 'build_clib' command produce static C library only. This modification add: - build shared C library @@ -35,23 +37,28 @@ import os import sys - -from setuptools.command import build_clib from distutils import log from distutils.dep_util import newer_group from distutils.file_util import copy_file -from utils.dpnp_build_utils import find_cmplr, find_dpl, find_mathlib, find_python_env +from setuptools.command import build_clib + +from utils.dpnp_build_utils import ( + find_cmplr, + find_dpl, + find_mathlib, + find_python_env, +) IS_WIN = False IS_MAC = False IS_LIN = False -if 'linux' in sys.platform: +if "linux" in sys.platform: IS_LIN = True -elif sys.platform == 'darwin': +elif sys.platform == "darwin": IS_MAC = True -elif sys.platform in ['win32', 'cygwin']: +elif sys.platform in ["win32", "cygwin"]: IS_WIN = True else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") @@ -62,13 +69,27 @@ # default variables (for Linux) _project_compiler = "dpcpp" _project_linker = "dpcpp" -_project_cmplr_flag_sycl_devel = ["-fsycl-device-code-split=per_kernel", "-fno-approx-func"] +_project_cmplr_flag_sycl_devel = [ + "-fsycl-device-code-split=per_kernel", + "-fno-approx-func", +] _project_cmplr_flag_sycl = ["-fsycl"] -_project_cmplr_flag_stdcpp_static = [] # This brakes TBB ["-static-libstdc++", "-static-libgcc"] +_project_cmplr_flag_stdcpp_static = ( + [] +) # This brakes TBB ["-static-libstdc++", "-static-libgcc"] _project_cmplr_flag_compatibility = ["-Wl,--enable-new-dtags"] _project_cmplr_flag_lib = ["-shared"] _project_cmplr_flag_release_build = ["-O3", "-DNDEBUG", "-fPIC"] -_project_cmplr_flag_debug_build = ["-g", "-O1", "-W", "-Wextra", "-Wshadow", "-Wall", "-Wstrict-prototypes", "-fPIC"] +_project_cmplr_flag_debug_build = [ + "-g", + "-O1", + "-W", + "-Wextra", + "-Wshadow", + "-Wall", + "-Wstrict-prototypes", + "-fPIC", +] _project_cmplr_flag_default_build = [] _project_cmplr_macro = [] _project_force_build = False @@ -77,12 +98,15 @@ _dpctrl_include = [] _dpctrl_libpath = [] _dpctrl_lib = [] -_sdl_cflags = ["-fstack-protector-strong", - "-fPIC", "-D_FORTIFY_SOURCE=2", - "-Wformat", - "-Wformat-security", - "-fno-strict-overflow", - "-fno-delete-null-pointer-checks"] +_sdl_cflags = [ + "-fstack-protector-strong", + "-fPIC", + "-D_FORTIFY_SOURCE=2", + "-Wformat", + "-Wformat-security", + "-fno-strict-overflow", + "-fno-delete-null-pointer-checks", +] _sdl_ldflags = ["-Wl,-z,noexecstack,-z,relro,-z,now"] # TODO remove when it will be fixed on TBB side. Details: @@ -90,7 +114,10 @@ # interface changes between earlier versions of Intel TBB and oneTBB. Disable support for Parallel STL algorithms # by defining PSTL_USE_PARALLEL_POLICIES (in GCC 9), _GLIBCXX_USE_TBB_PAR_BACKEND (in GCC 10) macro to zero # before inclusion of the first standard header file in each translation unit. -_project_cmplr_macro += [("PSTL_USE_PARALLEL_POLICIES", "0"), ("_GLIBCXX_USE_TBB_PAR_BACKEND", "0")] +_project_cmplr_macro += [ + ("PSTL_USE_PARALLEL_POLICIES", "0"), + ("_GLIBCXX_USE_TBB_PAR_BACKEND", "0"), +] # disable PSTL predefined policies objects (global queues, prevent fail on Windows) _project_cmplr_macro += [("ONEDPL_USE_PREDEFINED_POLICIES", "0")] @@ -103,7 +130,9 @@ _dpctrl_include += [dpctl.get_include()] # _dpctrl_libpath = for package build + for local build - _dpctrl_libpath = ["$ORIGIN/../dpctl"] + [os.path.join(dpctl.get_include(), '..')] + _dpctrl_libpath = ["$ORIGIN/../dpctl"] + [ + os.path.join(dpctl.get_include(), "..") + ] _dpctrl_lib = ["DPCTLSyclInterface"] except ImportError: """ @@ -130,7 +159,7 @@ """ Get the project build type """ -__dpnp_debug__ = os.environ.get('DPNP_DEBUG', None) +__dpnp_debug__ = os.environ.get("DPNP_DEBUG", None) if __dpnp_debug__ is not None: """ Debug configuration @@ -146,25 +175,52 @@ """ Get the math library environemnt """ -_project_cmplr_macro += [("MKL_ILP64", "1")] # using 64bit integers in MKL interface (long) +_project_cmplr_macro += [ + ("MKL_ILP64", "1") +] # using 64bit integers in MKL interface (long) if IS_LIN: - _mathlibs = ["mkl_sycl", "mkl_intel_ilp64", "mkl_sequential", - "mkl_core", "sycl", "OpenCL", "pthread", "m", "dl"] + _mathlibs = [ + "mkl_sycl", + "mkl_intel_ilp64", + "mkl_sequential", + "mkl_core", + "sycl", + "OpenCL", + "pthread", + "m", + "dl", + ] elif IS_WIN: - _mathlibs = ["mkl_sycl_dll", "mkl_intel_ilp64_dll", "mkl_tbb_thread_dll", "mkl_core_dll", "sycl", "OpenCL", "tbb"] + _mathlibs = [ + "mkl_sycl_dll", + "mkl_intel_ilp64_dll", + "mkl_tbb_thread_dll", + "mkl_core_dll", + "sycl", + "OpenCL", + "tbb", + ] """ Final set of arguments for extentions """ -_project_extra_link_args = _project_cmplr_flag_compatibility + _project_cmplr_flag_stdcpp_static + \ - ["-Wl,-rpath," + x for x in _project_rpath] + _sdl_ldflags +_project_extra_link_args = ( + _project_cmplr_flag_compatibility + + _project_cmplr_flag_stdcpp_static + + ["-Wl,-rpath," + x for x in _project_rpath] + + _sdl_ldflags +) _project_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..") -_project_backend_dir = [os.path.join(_project_dir, "dpnp", "backend", "include"), - os.path.join(_project_dir, "dpnp", "backend", "src") # not a public headers location - ] +_project_backend_dir = [ + os.path.join(_project_dir, "dpnp", "backend", "include"), + os.path.join( + _project_dir, "dpnp", "backend", "src" + ), # not a public headers location +] dpnp_backend_c_description = [ - ["dpnp_backend_c", + [ + "dpnp_backend_c", { "sources": [ "dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp", @@ -188,23 +244,32 @@ "dpnp/backend/src/queue_sycl.cpp", "dpnp/backend/src/verbose.cpp", ], - } - ] + }, + ] ] -def _compiler_compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): +def _compiler_compile( + self, + sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None, +): if not self.initialized: self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) + compile_info = self._setup_compile( + output_dir, macros, include_dirs, sources, depends, extra_postargs + ) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] - compile_opts.append('/c') + compile_opts.append("/c") if debug: compile_opts.extend(self.compile_options_debug) else: @@ -260,9 +325,9 @@ def _compiler_compile(self, sources, rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file - self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) + self.spawn([self.mc, "-h", h_dir, "-r", rc_dir, src]) base, _ = os.path.splitext(os.path.basename(src)) - rc_file = os.path.join(rc_dir, base + '.rc') + rc_file = os.path.join(rc_dir, base + ".rc") # then compile .RC to .RES file self.spawn([self.rc, "/fo" + obj, rc_file]) @@ -271,12 +336,13 @@ def _compiler_compile(self, sources, continue else: # how to handle this file? - raise CompileError("Don't know how to compile {} to {}" - .format(src, obj)) + raise CompileError( + "Don't know how to compile {} to {}".format(src, obj) + ) args = [self.cc] + compile_opts + pp_opts + d1trimfile_opts if add_cpp_opts: - args.append('/EHsc') + args.append("/EHsc") args.append(input_opt) args.append("/Fo" + obj) args.extend(extra_postargs) @@ -290,21 +356,20 @@ def _compiler_compile(self, sources, class custom_build_clib(build_clib.build_clib): - def build_libraries(self, libraries): - """ - This function is overloaded to the original function in build_clib.py file - """ + """This function is overloaded to the original function in build_clib.py file.""" for (lib_name, build_info) in libraries: - c_library_name = self.compiler.library_filename(lib_name, lib_type='shared') + c_library_name = self.compiler.library_filename( + lib_name, lib_type="shared" + ) c_library_filename = os.path.join(self.build_clib, c_library_name) dest_filename = "dpnp" # TODO need to fix destination directory - sources = build_info.get('sources') + sources = build_info.get("sources") if sources is None or not isinstance(sources, (list, tuple)): err_msg = f"in 'libraries' option (library '{lib_name}')," - err_msg += f" 'sources' must be present and must be a list of source filenames" + err_msg += " 'sources' must be present and must be a list of source filenames" raise DistutilsSetupError(err_msg) sources = list(sources) @@ -321,13 +386,24 @@ def build_libraries(self, libraries): _py_env_include, _py_env_lib = find_python_env(verbose=True) macros = _project_cmplr_macro - include_dirs = _cmplr_include + _dpl_include + _mathlib_include + _project_backend_dir + _dpctrl_include + _py_env_include + include_dirs = ( + _cmplr_include + + _dpl_include + + _mathlib_include + + _project_backend_dir + + _dpctrl_include + + _py_env_include + ) libraries = _mathlibs + _dpctrl_lib - library_dirs = _mathlib_path + _dpctrl_libpath + _py_env_lib # + _omp_libpath + library_dirs = ( + _mathlib_path + _dpctrl_libpath + _py_env_lib + ) # + _omp_libpath runtime_library_dirs = _project_rpath + _dpctrl_libpath extra_preargs = _project_cmplr_flag_sycl + _sdl_cflags extra_link_postargs = _project_cmplr_flag_lib - extra_link_preargs = _project_cmplr_flag_compatibility + _sdl_ldflags + extra_link_preargs = ( + _project_cmplr_flag_compatibility + _sdl_ldflags + ) force_build = _project_force_build compiler = [_project_compiler] linker = [_project_linker] @@ -351,26 +427,36 @@ def build_libraries(self, libraries): self.compiler.compile = _compiler_compile for source_it in sources: - obj_file_list = self.compiler.object_filenames([source_it], strip_dir=0, output_dir=self.build_temp) - obj_file = "".join(obj_file_list) # convert from list to file name - - newer_than_obj = newer_group([source_it], obj_file, missing="newer") + obj_file_list = self.compiler.object_filenames( + [source_it], strip_dir=0, output_dir=self.build_temp + ) + obj_file = "".join( + obj_file_list + ) # convert from list to file name + + newer_than_obj = newer_group( + [source_it], obj_file, missing="newer" + ) if force_build or newer_than_obj: if IS_WIN: - obj_file_list = self.compiler.compile(self.compiler, - [source_it], - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - extra_preargs=extra_preargs, - debug=self.debug) + obj_file_list = self.compiler.compile( + self.compiler, + [source_it], + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + extra_preargs=extra_preargs, + debug=self.debug, + ) else: - obj_file_list = self.compiler.compile([source_it], - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - extra_preargs=extra_preargs, - debug=self.debug) + obj_file_list = self.compiler.compile( + [source_it], + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + extra_preargs=extra_preargs, + debug=self.debug, + ) objects.extend(obj_file_list) else: objects.append(obj_file) @@ -378,41 +464,62 @@ def build_libraries(self, libraries): """ Build library file from objects """ - newer_than_lib = newer_group(objects, c_library_filename, missing="newer") + newer_than_lib = newer_group( + objects, c_library_filename, missing="newer" + ) if force_build or newer_than_lib: # TODO very brute way, need to refactor if IS_WIN: link_command = " ".join(compiler) link_command += " " + " ".join(default_flags) - link_command += " " + " ".join(objects) # specify *.obj files + link_command += " " + " ".join( + objects + ) # specify *.obj files link_command += " /link" # start linker options link_command += " " + " ".join(extra_link_preargs) - link_command += " " + ".lib ".join(libraries) + ".lib" # libraries - link_command += " /LIBPATH:" + " /LIBPATH:".join(library_dirs) - link_command += " /OUT:" + c_library_filename # output file name + link_command += ( + " " + ".lib ".join(libraries) + ".lib" + ) # libraries + link_command += " /LIBPATH:" + " /LIBPATH:".join( + library_dirs + ) + link_command += ( + " /OUT:" + c_library_filename + ) # output file name link_command += " " + " ".join(extra_link_postargs) print(link_command) os.system(link_command) else: - self.compiler.link_shared_lib(objects, - lib_name, - output_dir=self.build_clib, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=runtime_library_dirs, - extra_preargs=extra_preargs + extra_link_preargs, - extra_postargs=extra_link_postargs, - debug=self.debug, - build_temp=self.build_temp, - target_lang=language) + self.compiler.link_shared_lib( + objects, + lib_name, + output_dir=self.build_clib, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=runtime_library_dirs, + extra_preargs=extra_preargs + extra_link_preargs, + extra_postargs=extra_link_postargs, + debug=self.debug, + build_temp=self.build_temp, + target_lang=language, + ) """ Copy library to the destination path """ - copy_file(c_library_filename, dest_filename, verbose=self.verbose, dry_run=self.dry_run) + copy_file( + c_library_filename, + dest_filename, + verbose=self.verbose, + dry_run=self.dry_run, + ) # TODO very brute way, need to refactor if c_library_filename.endswith(".dll"): - copy_file(c_library_filename.replace(".dll", ".lib"), - dest_filename, verbose=self.verbose, dry_run=self.dry_run) + copy_file( + c_library_filename.replace(".dll", ".lib"), + dest_filename, + verbose=self.verbose, + dry_run=self.dry_run, + ) log.info(f"DPNP: building {lib_name} library finished") diff --git a/utils/command_build_cmake_clib.py b/utils/command_build_cmake_clib.py index 0ef0e240bb1e..62c7cb8d1e26 100644 --- a/utils/command_build_cmake_clib.py +++ b/utils/command_build_cmake_clib.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -24,16 +24,14 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -""" -Module to call cmake based procedure by build_cmake_clib command -""" +"""Module to call cmake based procedure by build_cmake_clib command.""" import os -import sys import pathlib -from setuptools.command import build_clib +import sys from distutils import log +from setuptools.command import build_clib """ Detect platform @@ -42,14 +40,16 @@ IS_MAC = False IS_LIN = False -if 'linux' in sys.platform: +if "linux" in sys.platform: IS_LIN = True -elif sys.platform == 'darwin': +elif sys.platform == "darwin": IS_MAC = True -elif sys.platform in ['win32', 'cygwin']: +elif sys.platform in ["win32", "cygwin"]: IS_WIN = True else: - raise EnvironmentError("DPNP cmake builder: " + sys.platform + " not supported") + raise EnvironmentError( + "DPNP cmake builder: " + sys.platform + " not supported" + ) """ @@ -65,7 +65,9 @@ import dpctl _dpctrl_include_dir = str(os.path.abspath(dpctl.get_include())) - _dpctrl_library_dir = str(os.path.abspath(os.path.join(dpctl.get_include(), ".."))) + _dpctrl_library_dir = str( + os.path.abspath(os.path.join(dpctl.get_include(), "..")) + ) _dpctrl_exists = "ON" except ImportError: """ @@ -76,7 +78,7 @@ """ Detect enabling DPNP backend tests """ -_dpnp_backend_tests_enable = os.environ.get('DPNP_BACKEND_TESTS_ENABLE', None) +_dpnp_backend_tests_enable = os.environ.get("DPNP_BACKEND_TESTS_ENABLE", None) """ @@ -86,7 +88,9 @@ class custom_build_cmake_clib(build_clib.build_clib): def run(self): - root_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..")) + root_dir = os.path.normpath( + os.path.join(os.path.dirname(__file__), "..") + ) log.info(f"Project directory is: {root_dir}") backend_directory = os.path.join(root_dir, "dpnp", "backend") @@ -112,18 +116,22 @@ def run(self): "-S" + backend_directory, "-B" + abs_build_temp_path, "-DCMAKE_BUILD_TYPE=" + config, - "-DDPNP_INSTALL_PREFIX=" + install_directory.replace(os.sep, "/"), # adjust to cmake requirenments + "-DDPNP_INSTALL_PREFIX=" + + install_directory.replace( + os.sep, "/" + ), # adjust to cmake requirenments "-DDPNP_INSTALL_STRUCTURED=OFF", # "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + install_directory, "-DDPNP_SYCL_QUEUE_MGR_ENABLE:BOOL=" + _dpctrl_exists, "-DDPNP_QUEUEMGR_INCLUDE_DIR=" + _dpctrl_include_dir, "-DDPNP_QUEUEMGR_LIB_DIR=" + _dpctrl_library_dir, "-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON", - "-DDPNP_BACKEND_TESTS:BOOL=" + enable_tests + "-DDPNP_BACKEND_TESTS:BOOL=" + enable_tests, ] # didn't find how to add it inside cmake, that is why this is here import multiprocessing + cpu_count = multiprocessing.cpu_count() # possible that jobs count must be +-1 against CPUs count jobs = "-j" + str(cpu_count) diff --git a/utils/command_clean.py b/utils/command_clean.py index 785340aa4023..41042c2b2200 100644 --- a/utils/command_clean.py +++ b/utils/command_clean.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,10 +25,11 @@ # ***************************************************************************** import os -from setuptools import Command +from distutils import log from fnmatch import fnmatch from shutil import rmtree -from distutils import log + +from setuptools import Command class source_clean(Command): @@ -41,9 +42,16 @@ class source_clean(Command): description = "Clean up the project source tree" - CLEAN_ROOTDIRS = ['build', 'build_cython', 'cython_debug', 'Intel_NumPy.egg-info', 'doc/_build', 'CMakeFiles'] - CLEAN_DIRS = ['__pycache__'] - CLEAN_FILES = ['*.so', '*.pyc', '*.pyd', '*.dll', '*.lib', 'CMakeCache.txt'] + CLEAN_ROOTDIRS = [ + "build", + "build_cython", + "cython_debug", + "Intel_NumPy.egg-info", + "doc/_build", + "CMakeFiles", + ] + CLEAN_DIRS = ["__pycache__"] + CLEAN_FILES = ["*.so", "*.pyc", "*.pyd", "*.dll", "*.lib", "CMakeCache.txt"] user_options = [] @@ -54,7 +62,9 @@ def finalize_options(self): pass def run(self): - root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + root_dir = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..") + ) log.info(f"DPNP: cleaning in {root_dir}") # removing dirs from root_dir @@ -81,4 +91,4 @@ def run(self): log.info(f"rm {rfile}") os.remove(rfile) - log.info(f"DPNP: cleaning finished") + log.info("DPNP: cleaning finished") diff --git a/utils/command_style.py b/utils/command_style.py index 4d5a48ed5683..483da43978fb 100644 --- a/utils/command_style.py +++ b/utils/command_style.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,6 +25,7 @@ # ***************************************************************************** import os + from setuptools import Command @@ -38,41 +39,41 @@ class source_style(Command): """ - user_options = [ - ('apply', 'a', 'Apply codestyle changes to sources.') - ] + user_options = [("apply", "a", "Apply codestyle changes to sources.")] description = "Code style check and apply (with -a)" boolean_options = [] _result_marker = "Result:" - _project_directory_excluded = ['build', '.git'] - - _c_formatter = 'clang-format' - _c_formatter_install_msg = 'pip install clang' - _c_formatter_command_line = [_c_formatter, '-style=file'] - _c_file_extensions = ['.h', '.c', '.hpp', '.cpp'] - - _py_checker = 'pycodestyle' - _py_formatter = 'autopep8' - _py_formatter_install_msg = 'pip install --upgrade autopep8\npip install --upgrade pycodestyle' + _project_directory_excluded = ["build", ".git"] + + _c_formatter = "clang-format" + _c_formatter_install_msg = "pip install clang" + _c_formatter_command_line = [_c_formatter, "-style=file"] + _c_file_extensions = [".h", ".c", ".hpp", ".cpp"] + + _py_checker = "pycodestyle" + _py_formatter = "autopep8" + _py_formatter_install_msg = ( + "pip install --upgrade autopep8\npip install --upgrade pycodestyle" + ) _py_checker_command_line = [_py_checker] - _py_formatter_command_line = [ - _py_formatter, - '--in-place'] - _py_file_extensions = ['.py', '.pyx', '.pxd', '.pxi'] + _py_formatter_command_line = [_py_formatter, "--in-place"] + _py_file_extensions = [".py", ".pyx", ".pxd", ".pxi"] def _get_file_list(self, path, search_extentions): - """ Return file list to be adjusted or checked + """Return file list to be adjusted or checked path - is the project base path search_extentions - list of strings with files extension to search recurcivly """ files = [] - exluded_directories_full_path = [os.path.join( - path, excluded_dir) for excluded_dir in self._project_directory_excluded] + exluded_directories_full_path = [ + os.path.join(path, excluded_dir) + for excluded_dir in self._project_directory_excluded + ] - # r=root, d=directories, f = files - for r, d, f in os.walk(path): + # r=root, _=directories, f=files + for r, _, f in os.walk(path): # match exclude pattern in current directory found = False for excluded_dir in exluded_directories_full_path: @@ -83,7 +84,7 @@ def _get_file_list(self, path, search_extentions): continue for file in f: - filename, extention = os.path.splitext(file) + _, extention = os.path.splitext(file) if extention in search_extentions: files.append(os.path.join(r, file)) @@ -100,9 +101,9 @@ def run(self): print("Project directory is: %s" % root_dir) if self.apply: - self._c_formatter_command_line += ['-i'] + self._c_formatter_command_line += ["-i"] else: - self._c_formatter_command_line += ['-output-replacements-xml'] + self._c_formatter_command_line += ["-output-replacements-xml"] import subprocess @@ -113,14 +114,17 @@ def run(self): try: for f in c_files: command_output = subprocess.Popen( - self._c_formatter_command_line + [f], stdout=subprocess.PIPE) + self._c_formatter_command_line + [f], stdout=subprocess.PIPE + ) command_cout, command_cerr = command_output.communicate() if not self.apply: - if command_cout.find(b' 0: + if command_cout.find(b" 0: bad_style_file_names.append(f) except BaseException as original_error: - print("%s is not installed.\nPlease use: %s" % - (self._c_formatter, self._c_formatter_install_msg)) + print( + "%s is not installed.\nPlease use: %s" + % (self._c_formatter, self._c_formatter_install_msg) + ) print("Original error message is:\n", original_error) exit(1) @@ -130,17 +134,21 @@ def run(self): for f in py_files: if not self.apply: command_output = subprocess.Popen( - self._py_checker_command_line + [f]) + self._py_checker_command_line + [f] + ) returncode = command_output.wait() if returncode != 0: bad_style_file_names.append(f) else: command_output = subprocess.Popen( - self._py_formatter_command_line + [f]) + self._py_formatter_command_line + [f] + ) command_output.wait() except BaseException as original_error: - print("%s is not installed.\nPlease use: %s" % - (self._py_formatter, self._py_formatter_install_msg)) + print( + "%s is not installed.\nPlease use: %s" + % (self._py_formatter, self._py_formatter_install_msg) + ) print("Original error message is:\n", original_error) exit(1) diff --git a/utils/dpnp_build_utils.py b/utils/dpnp_build_utils.py index 2ccf211587d3..a27c1df1541b 100644 --- a/utils/dpnp_build_utils.py +++ b/utils/dpnp_build_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -28,12 +28,17 @@ import os import sys - IS_CONDA_BUILD = os.environ.get("CONDA_BUILD") == "1" -def find_library(var_name, rel_header_paths, rel_lib_paths, - rel_include_path="include", rel_libdir_path="lib", verbose=False): +def find_library( + var_name, + rel_header_paths, + rel_lib_paths, + rel_include_path="include", + rel_libdir_path="lib", + verbose=False, +): """ Find specified libraries/headers in the directory from the environment variable. @@ -63,8 +68,12 @@ def find_library(var_name, rel_header_paths, rel_lib_paths, include_find = os.path.join(root_dir, rel_include_path) libpath_find = os.path.join(root_dir, rel_libdir_path) - required_headers = [os.path.join(include_find, rel_path) for rel_path in rel_header_paths] - required_libs = [os.path.join(libpath_find, rel_path) for rel_path in rel_lib_paths] + required_headers = [ + os.path.join(include_find, rel_path) for rel_path in rel_header_paths + ] + required_libs = [ + os.path.join(libpath_find, rel_path) for rel_path in rel_lib_paths + ] for required_file in required_headers + required_libs: if not os.path.exists(required_file): @@ -95,53 +104,71 @@ def find_cmplr(verbose=False): rel_header_paths = rel_lib_paths = [] # try to find library in specified directory from $DPCPPROOT - if 'linux' in sys.platform: - rel_include_path = os.path.join('linux', 'include') - rel_libdir_path = os.path.join('linux', 'lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('windows', 'include') - rel_libdir_path = os.path.join('windows', 'lib') + if "linux" in sys.platform: + rel_include_path = os.path.join("linux", "include") + rel_libdir_path = os.path.join("linux", "lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("windows", "include") + rel_libdir_path = os.path.join("windows", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - cmplr_include, cmplr_libpath = find_library("DPCPPROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + cmplr_include, cmplr_libpath = find_library( + "DPCPPROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find library in specified directory from $ONEAPI_ROOT if not cmplr_include or not cmplr_libpath: - if sys.platform in ['linux']: - rel_include_path = os.path.join('compiler', 'latest', 'linux', 'include') - rel_libdir_path = os.path.join('compiler', 'latest', 'linux', 'lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('compiler', 'latest', 'windows', 'include') - rel_libdir_path = os.path.join('compiler', 'latest', 'windows', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join( + "compiler", "latest", "linux", "include" + ) + rel_libdir_path = os.path.join("compiler", "latest", "linux", "lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join( + "compiler", "latest", "windows", "include" + ) + rel_libdir_path = os.path.join( + "compiler", "latest", "windows", "lib" + ) else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - cmplr_include, cmplr_libpath = find_library("ONEAPI_ROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + cmplr_include, cmplr_libpath = find_library( + "ONEAPI_ROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find in Python environment if not cmplr_include or not cmplr_libpath: - if sys.platform in ['linux']: - rel_include_path = os.path.join('include') - rel_libdir_path = os.path.join('lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('Library', 'include') - rel_libdir_path = os.path.join('Library', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("include") + rel_libdir_path = os.path.join("lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("Library", "include") + rel_libdir_path = os.path.join("Library", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") conda_root_var = "PREFIX" if IS_CONDA_BUILD else "CONDA_PREFIX" - cmplr_include, cmplr_libpath = find_library(conda_root_var, rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + cmplr_include, cmplr_libpath = find_library( + conda_root_var, + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) if not cmplr_include or not cmplr_libpath: raise EnvironmentError("DPNP: Unable to find compiler") @@ -169,56 +196,74 @@ def find_dpl(verbose=False): rel_libdir_path = "" # try to find library in specified directory from $DPLROOT like a repository - rel_include_path = os.path.join('include') + rel_include_path = os.path.join("include") - dpl_include, dpl_libpath = find_library("DPLROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + dpl_include, dpl_libpath = find_library( + "DPLROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find library in specified directory from $DPLROOT if not dpl_include or not dpl_libpath: - if 'linux' in sys.platform: - rel_include_path = os.path.join('linux', 'include') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('windows', 'include') + if "linux" in sys.platform: + rel_include_path = os.path.join("linux", "include") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("windows", "include") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - dpl_include, dpl_libpath = find_library("DPLROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + dpl_include, dpl_libpath = find_library( + "DPLROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find library in specified directory from $ONEAPI_ROOT if not dpl_include or not dpl_libpath: - if sys.platform in ['linux']: - rel_include_path = os.path.join('dpl', 'latest', 'linux', 'include') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('dpl', 'latest', 'windows', 'include') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("dpl", "latest", "linux", "include") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join( + "dpl", "latest", "windows", "include" + ) else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - dpl_include, dpl_libpath = find_library("ONEAPI_ROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + dpl_include, dpl_libpath = find_library( + "ONEAPI_ROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find in Python environment if not dpl_include or not dpl_libpath: - if sys.platform in ['linux']: - rel_include_path = os.path.join('include') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('Library', 'include') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("include") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("Library", "include") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") conda_root_var = "PREFIX" if IS_CONDA_BUILD else "CONDA_PREFIX" - dpl_include, dpl_libpath = find_library(conda_root_var, rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + dpl_include, dpl_libpath = find_library( + conda_root_var, + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) if not dpl_include or not dpl_libpath: raise EnvironmentError("DPNP: Unable to find DPL") @@ -241,63 +286,77 @@ def find_mathlib(verbose=False): path to include directory, path to library directory """ - if sys.platform in ['linux']: + if sys.platform in ["linux"]: rel_header_paths = [os.path.join("oneapi", "mkl.hpp")] rel_lib_paths = ["libmkl_sycl.so"] - elif sys.platform in ['win32', 'cygwin']: + elif sys.platform in ["win32", "cygwin"]: rel_header_paths = [os.path.join("oneapi", "mkl.hpp")] rel_lib_paths = ["mkl_sycl_dll.lib"] else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") # try to find library in specified directory from $MKLROOT - if sys.platform in ['linux']: - rel_include_path = os.path.join('linux', 'include') - rel_libdir_path = os.path.join('linux', 'lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('windows', 'include') - rel_libdir_path = os.path.join('windows', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("linux", "include") + rel_libdir_path = os.path.join("linux", "lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("windows", "include") + rel_libdir_path = os.path.join("windows", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - mathlib_include, mathlib_path = find_library("MKLROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + mathlib_include, mathlib_path = find_library( + "MKLROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find library in specified directory from $ONEAPI_ROOT if not mathlib_include or not mathlib_path: - if sys.platform in ['linux']: - rel_include_path = os.path.join('mkl', 'latest', 'linux', 'include') - rel_libdir_path = os.path.join('mkl', 'latest', 'linux', 'lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('mkl', 'latest', 'windows', 'include') - rel_libdir_path = os.path.join('mkl', 'latest', 'windows', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("mkl", "latest", "linux", "include") + rel_libdir_path = os.path.join("mkl", "latest", "linux", "lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join( + "mkl", "latest", "windows", "include" + ) + rel_libdir_path = os.path.join("mkl", "latest", "windows", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") - mathlib_include, mathlib_path = find_library("ONEAPI_ROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + mathlib_include, mathlib_path = find_library( + "ONEAPI_ROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) # try to find in Python environment if not mathlib_include or not mathlib_path: - if sys.platform in ['linux']: - rel_include_path = os.path.join('include') - rel_libdir_path = os.path.join('lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('Library', 'include') - rel_libdir_path = os.path.join('Library', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("include") + rel_libdir_path = os.path.join("lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("Library", "include") + rel_libdir_path = os.path.join("Library", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") conda_root_var = "PREFIX" if IS_CONDA_BUILD else "CONDA_PREFIX" - mathlib_include, mathlib_path = find_library(conda_root_var, rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + mathlib_include, mathlib_path = find_library( + conda_root_var, + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) if not mathlib_include or not mathlib_path: raise EnvironmentError("DPNP: Unable to find math library") @@ -321,17 +380,25 @@ def _find_omp_in_dpcpp_root(verbose=False): """ rel_header_paths = rel_lib_paths = [] - if 'linux' in sys.platform: - rel_include_path = os.path.join('linux', 'compiler', 'include') - rel_libdir_path = os.path.join('linux', 'compiler', 'lib', 'intel64') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('windows', 'compiler', 'include') - rel_libdir_path = os.path.join('windows', 'compiler', 'lib', 'intel64_win') + if "linux" in sys.platform: + rel_include_path = os.path.join("linux", "compiler", "include") + rel_libdir_path = os.path.join("linux", "compiler", "lib", "intel64") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("windows", "compiler", "include") + rel_libdir_path = os.path.join( + "windows", "compiler", "lib", "intel64_win" + ) else: - rel_include_path, rel_libdir_path = 'include', 'lib' + rel_include_path, rel_libdir_path = "include", "lib" - return find_library("DPCPPROOT", rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, rel_libdir_path=rel_libdir_path, verbose=verbose) + return find_library( + "DPCPPROOT", + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) def find_omp(verbose=False): @@ -351,7 +418,9 @@ def find_omp(verbose=False): omp_include, omp_libpath = _find_omp_in_dpcpp_root(verbose=verbose) if not omp_include or not omp_libpath: - raise EnvironmentError(f"DPNP: Unable to find omp. Please install Intel OneAPI environment") + raise EnvironmentError( + "DPNP: Unable to find omp. Please install Intel OneAPI environment" + ) return omp_include, omp_libpath @@ -373,25 +442,29 @@ def find_python_env(verbose=False): rel_header_paths = rel_lib_paths = [] - if sys.platform in ['linux']: - rel_include_path = os.path.join('include') - rel_libdir_path = os.path.join('lib') - elif sys.platform in ['win32', 'cygwin']: - rel_include_path = os.path.join('Library', 'include') - rel_libdir_path = os.path.join('Library', 'lib') + if sys.platform in ["linux"]: + rel_include_path = os.path.join("include") + rel_libdir_path = os.path.join("lib") + elif sys.platform in ["win32", "cygwin"]: + rel_include_path = os.path.join("Library", "include") + rel_libdir_path = os.path.join("Library", "lib") else: raise EnvironmentError("DPNP: " + sys.platform + " not supported") conda_root_var = "PREFIX" if IS_CONDA_BUILD else "CONDA_PREFIX" - env_include, env_path = find_library(conda_root_var, rel_header_paths, rel_lib_paths, - rel_include_path=rel_include_path, - rel_libdir_path=rel_libdir_path, - verbose=verbose) + env_include, env_path = find_library( + conda_root_var, + rel_header_paths, + rel_lib_paths, + rel_include_path=rel_include_path, + rel_libdir_path=rel_libdir_path, + verbose=verbose, + ) - env_include += [os.path.join(os.getenv(conda_root_var), 'include')] + env_include += [os.path.join(os.getenv(conda_root_var), "include")] if not env_include or not env_path: - raise EnvironmentError(f"DPNP: Unable to find Python environment paths") + raise EnvironmentError("DPNP: Unable to find Python environment paths") return env_include, env_path diff --git a/utils/dpnp_coverage.py b/utils/dpnp_coverage.py index 5959fe81b863..2639c9be21de 100644 --- a/utils/dpnp_coverage.py +++ b/utils/dpnp_coverage.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # ***************************************************************************** -# Copyright (c) 2016-2020, Intel Corporation +# Copyright (c) 2016-2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -25,8 +25,8 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** -import os import inspect +import os name_dict = {} module_names_set = dict() @@ -41,7 +41,7 @@ def print_header_line(): print(f"{'='*col0_width}", end=sep) print(f"{'='*col1_width}", end=sep) - for mod_name in module_names_set.keys(): + for _ in module_names_set.keys(): print(f"{'='*col2_width}", end=sep) print() @@ -81,17 +81,19 @@ def add_symbol(item_name, module_name, item_val): module_names_set[module_name] = 0 else: module_names_set[module_name] += 1 + + # else: # print(f"item_name={item_name}, {name_dict[item_name][module_name]} replaced with {str(item_val)}") def fill_data(module_name, module_obj, parent_module_name=""): for item_name_raw, item_val in inspect.getmembers(module_obj): - if (item_name_raw[0] == "_"): + if item_name_raw[0] == "_": continue item_name = os.path.join(parent_module_name, item_name_raw) - if getattr(item_val, '__call__', False): + if callable(item_val): str_item = item_val try: str_item = inspect.signature(item_val) @@ -103,6 +105,8 @@ def fill_data(module_name, module_obj, parent_module_name=""): fill_data(module_name, item_val, parent_module_name=item_name) else: print(f"IGNORED: {module_name}: module: {item_name}") + + # elif isinstance(item_val, (tuple, list, float, int)): # add_symbol(item_name, module_name, item_val) # elif isinstance(item_val, str): @@ -123,7 +127,7 @@ def print_data(): for mod_name in module_names_set.keys(): val = symbol_values.get(mod_name, "") - val_prn = str(val)[0:col2_width - 1] + val_prn = str(val)[0 : col2_width - 1] print(f"{val_prn:{col2_width}}", end=sep) print() @@ -131,22 +135,25 @@ def print_data(): print_footer() -if __name__ == '__main__': +if __name__ == "__main__": try: import dpnp + fill_data("DPNP", dpnp) except ImportError: print("No DPNP module loaded") try: import numpy + fill_data("NumPy", numpy) except ImportError: print("No NumPy module loaded") try: import cupy + fill_data("cuPy", cupy) except ImportError: print("No cuPy module loaded")