diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index b78bd12af2..a5c34c29df 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,6 +2,9 @@ ### New features since last release +* Complete overhaul of repository structure to facilitates integration of multiple backends. Refactoring efforts we directed to improve development performance, code reuse and decrease overall overhead to propagate changes through backends. New C++ modular build strategy allows for faster test builds restricted to a module. Update CI/CD actions concurrency strategy. Change minimal Python version to 3.9. + [(#472)] (https://github.com/PennyLaneAI/pennylane-lightning/pull/472) + * Wheels are built with native support for sparse Hamiltonians. [(#470)] (https://github.com/PennyLaneAI/pennylane-lightning/pull/470) @@ -11,13 +14,15 @@ ### Breaking changes ### Improvements +* Merge Lightning Qubit and Lightning Kokkos backends in the new repository. + [(#472)] (https://github.com/PennyLaneAI/pennylane-lightning/pull/472) ### Documentation ### Bug fixes * `apply` no longer mutates the inputted list of operations. - [(#475)](https://github.com/PennyLaneAI/pennylane-lightning/pull/475) + [(#474)](https://github.com/PennyLaneAI/pennylane-lightning/pull/474) ### Contributors diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml deleted file mode 100644 index fd053e90fc..0000000000 --- a/.github/workflows/benchmarks.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Benchmarking -on: - pull_request: - push: - branches: - - master - -env: - GCC_VERSION: 11 - -jobs: - benchmarks: - strategy: - matrix: - os: [ubuntu-22.04] - - name: Gate benchmarks - runs-on: ${{ matrix.os }} - - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - - name: Checkout PennyLane-Lightning - uses: actions/checkout@v3 - - - uses: actions/setup-python@v4 - name: Install Python - with: - python-version: '3.8' - - - name: Install dependencies - run: sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION ninja-build libopenblas-dev - - - name: Build GBenchmark - run: | - cmake pennylane_lightning/src/ -BBuildGBench -DBUILD_BENCHMARKS=ON -DENABLE_BLAS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja - cmake --build ./BuildGBench --parallel 2 - - - name: Run GBenchmark - run: | - ./BuildGBench/benchmarks/utils --benchmark_filter="^[a-z]+_innerProd_cmplx/[0-9]+$" - ./BuildGBench/benchmarks/pennylane_lightning_bench_operations --benchmark_filter="^applyOperations_RandOps/LM_all/32/[0-9]+$" diff --git a/.github/workflows/build_and_cache_Kokkos_linux.yml b/.github/workflows/build_and_cache_Kokkos_linux.yml index 55b7c6a3c8..3129bcb0d8 100644 --- a/.github/workflows/build_and_cache_Kokkos_linux.yml +++ b/.github/workflows/build_and_cache_Kokkos_linux.yml @@ -1,4 +1,4 @@ -name: Build and Cache Kokkos and Kokkos Kernels +name: Build and Cache Kokkos env: GCC_VERSION: 11 @@ -17,20 +17,19 @@ on: description: "Kokkos version" value: ${{ jobs.linux-set-builder-matrix.outputs.kokkos_version }} +concurrency: + group: build_and_cache_Kokkos_linux-${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + jobs: linux-set-builder-matrix: name: Set Kokkos builder matrix runs-on: ubuntu-22.04 steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Kokkos execution strategy id: exec_model - run: echo "exec_model=[\"SERIAL\"]" >> $GITHUB_OUTPUT # We may also adopt [OPENMP, THREADS] in later iterations + run: echo "exec_model=[\"SERIAL\", \"OPENMP\"]" >> $GITHUB_OUTPUT - name: Kokkos version id: kokkos_version @@ -47,15 +46,10 @@ jobs: exec_model: ${{ fromJson(needs.linux-set-builder-matrix.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.linux-set-builder-matrix.outputs.kokkos_version) }} - name: Kokkos core & kernels (${{ matrix.exec_model }}) + name: Kokkos core (${{ matrix.exec_model }}) runs-on: ${{ inputs.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Cache installation directories id: kokkos-cache uses: actions/cache@v3 @@ -71,10 +65,6 @@ jobs: git checkout ${{ matrix.kokkos_version }} cd - pushd . &> /dev/null - git clone https://github.com/kokkos/kokkos-kernels.git - cd kokkos-kernels - git checkout ${{ matrix.kokkos_version }} - cd - - name: Install dependencies (Ubuntu) if: ${{ (((inputs.os == 'ubuntu-latest') || (inputs.os == 'ubuntu-20.04') || (inputs.os == 'ubuntu-22.04')) && (steps.kokkos-cache.outputs.cache-hit != 'true')) }} @@ -88,6 +78,7 @@ jobs: cd kokkos cmake -BBuild . -DCMAKE_INSTALL_PREFIX=${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} \ -DKokkos_ENABLE_COMPLEX_ALIGN=OFF \ + -DKokkos_ENABLE_SERIAL=ON \ -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF \ -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" \ @@ -96,19 +87,4 @@ jobs: -G Ninja cmake --build ./Build --verbose cmake --install ./Build - cd - - - - name: Build Kokkos kernels library (Ubuntu) - if: ${{ (((inputs.os == 'ubuntu-latest') || (inputs.os == 'ubuntu-20.04') || (inputs.os == 'ubuntu-22.04')) && (steps.kokkos-cache.outputs.cache-hit != 'true')) }} - run: | - mkdir -p ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} - cd kokkos-kernels - cmake -BBuild . -DCMAKE_INSTALL_PREFIX=${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} \ - -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ - -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" \ - -DCMAKE_CXX_STANDARD=20 \ - -DCMAKE_PREFIX_PATH=${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -G Ninja - cmake --build ./Build --verbose - cmake --install ./Build + cd - \ No newline at end of file diff --git a/.github/workflows/dev_version_script.py b/.github/workflows/dev_version_script.py index f7ab794b26..14acb3bd6f 100644 --- a/.github/workflows/dev_version_script.py +++ b/.github/workflows/dev_version_script.py @@ -17,7 +17,7 @@ import re -VERSION_FILE_PATH = "pennylane_lightning/_version.py" +VERSION_FILE_PATH = "pennylane_lightning/core/_version.py" rgx_ver = re.compile('^__version__ = "(.*?)"$') diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 5044fc983a..80427b35fd 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -5,26 +5,25 @@ on: branches: - master +concurrency: + group: format-${{ github.ref }} + cancel-in-progress: true + jobs: black: runs-on: ubuntu-22.04 steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Set up Python uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: '3.9' - name: Install dependencies run: - python -m pip install --upgrade pip - pip install click==8.0.4 black + python -m pip install click==8.0.4 black==23.7.0 - - uses: actions/checkout@v3 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 - name: Run Black run: black -l 100 pennylane_lightning/ tests/ --check @@ -34,11 +33,6 @@ jobs: runs-on: ubuntu-22.04 steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Install dependencies run: sudo apt update && sudo apt -y install clang-format-14 python3 @@ -49,7 +43,7 @@ jobs: run: ./bin/format --check --cfversion 14 ./pennylane_lightning/src build_and_cache_Kokkos: - name: "Build and cache Kokkos and Kokkos Kernels" + name: "Build and cache Kokkos" uses: ./.github/workflows/build_and_cache_Kokkos_linux.yml with: os: ubuntu-22.04 @@ -59,25 +53,23 @@ jobs: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit"] exec_model: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.kokkos_version) }} + exclude: + - exec_model: OPENMP name: Tidy (C++) runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.8' + python-version: '3.9' - name: Restoring cached dependencies id: kokkos-cache @@ -98,5 +90,11 @@ jobs: - name: Run clang-tidy compilation run: | - cmake -BBuild -DENABLE_CLANG_TIDY=ON -DCLANG_TIDY_BINARY=clang-tidy-14 -DBUILD_TESTS=ON -DENABLE_WARNINGS=ON -DCMAKE_CXX_COMPILER="$(which g++-10)" -G Ninja . - cmake --build ./Build + cmake -BBuild -G Ninja . \ + -DENABLE_CLANG_TIDY=ON \ + -DCLANG_TIDY_BINARY=clang-tidy-14 \ + -DBUILD_TESTS=ON \ + -DENABLE_WARNINGS=ON \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DCMAKE_CXX_COMPILER="$(which g++-10)" + cmake --build ./Build \ No newline at end of file diff --git a/.github/workflows/post_release_version_bump.yml b/.github/workflows/post_release_version_bump.yml index 4b85edebaf..312c746313 100644 --- a/.github/workflows/post_release_version_bump.yml +++ b/.github/workflows/post_release_version_bump.yml @@ -25,7 +25,7 @@ jobs: - name: Run version bump run: > python .github/workflows/vb_script.py - --version_path "./pennylane_lightning/_version.py" + --version_path "./pennylane_lightning/core/_version.py" --changelog_path "./.github/CHANGELOG.md" --post_release - name: Create Pull Request diff --git a/.github/workflows/pre_release_version_bump.yml b/.github/workflows/pre_release_version_bump.yml index 4d7f383713..8be3604bc4 100644 --- a/.github/workflows/pre_release_version_bump.yml +++ b/.github/workflows/pre_release_version_bump.yml @@ -24,7 +24,7 @@ jobs: - name: Run version bump run: > python .github/workflows/vb_script.py - --version_path "./pennylane_lightning/_version.py" + --version_path "./pennylane_lightning/core/_version.py" --changelog_path "./.github/CHANGELOG.md" --pre_release - name: Create Pull Request diff --git a/.github/workflows/set_wheel_build_matrix.yml b/.github/workflows/set_wheel_build_matrix.yml index 6dc2e4d344..2cb9d820e7 100644 --- a/.github/workflows/set_wheel_build_matrix.yml +++ b/.github/workflows/set_wheel_build_matrix.yml @@ -1,7 +1,7 @@ name: Set wheel build matrix env: - PYTHON3_MIN_VERSION: "8" + PYTHON3_MIN_VERSION: "9" PYTHON3_MAX_VERSION: "11" on: @@ -14,17 +14,22 @@ on: python_version: description: "Python versions." value: ${{ jobs.set-builder-matrix.outputs.python_version }} + exec_model: + description: "The execution model for Kokkos." + value: ${{ jobs.set-builder-matrix.outputs.exec_model }} + kokkos_version: + description: "Kokkos version" + value: ${{ jobs.set-builder-matrix.outputs.kokkos_version }} + +concurrency: + group: set_wheel_build_matrix-${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true jobs: set-builder-matrix: runs-on: ubuntu-22.04 steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -41,5 +46,15 @@ jobs: --max-version=3.${{ env.PYTHON3_MAX_VERSION }} --range)" >> $GITHUB_OUTPUT fi + - name: Kokkos execution strategy + id: exec_model + run: echo "exec_model=[\"OPENMP\"]" >> $GITHUB_OUTPUT # We may also adopt [THREADS] in later iterations + + - name: Kokkos version + id: kokkos_version + run: echo "kokkos_version=[\"4.0.01\"]" >> $GITHUB_OUTPUT + outputs: python_version: ${{ steps.pyver.outputs.python_version }} + exec_model: ${{ steps.exec_model.outputs.exec_model }} + kokkos_version: ${{ steps.kokkos_version.outputs.kokkos_version }} \ No newline at end of file diff --git a/.github/workflows/tests_linux.yml b/.github/workflows/tests_linux.yml index 5c05760f9f..ac9057fbef 100644 --- a/.github/workflows/tests_linux.yml +++ b/.github/workflows/tests_linux.yml @@ -12,25 +12,25 @@ env: GCC_VERSION: 11 OMP_NUM_THREADS: "2" +concurrency: + group: tests_linux-${{ github.ref }} + cancel-in-progress: true + jobs: cpptests: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit"] - name: C++ tests (Linux) + name: C++ tests runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -42,39 +42,52 @@ jobs: - name: Build and run unit tests run: | - cmake . -BBuild -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTS=ON -DENABLE_KOKKOS=OFF -DENABLE_PYTHON=OFF -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuild -G Ninja \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DBUILD_TESTS=ON \ + -DENABLE_PYTHON=OFF \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./Build cd ./Build mkdir -p ./tests/results - ./pennylane_lightning_test_runner --order lex --reporter junit --out ./tests/results/report_${{ github.job }}.xml + for file in *runner ; do ./$file --order lex --reporter junit --out ./tests/results/report_$file.xml; done; - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: - name: ubuntu-tests-reports - path: ./Build/tests/results/report_${{ github.job }}.xml + name: ubuntu-tests-reports-${{ github.job }}-${{ matrix.pl_backend }} + path: ./Build/tests/results/ - name: Build and run unit tests for code coverage run: | - cmake . -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DENABLE_KOKKOS=OFF -DENABLE_PYTHON=OFF -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuildCov -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_TESTS=ON \ + -DENABLE_PYTHON=OFF \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DENABLE_COVERAGE=ON \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./BuildCov cd ./BuildCov - ./pennylane_lightning_test_runner - lcov --directory . -b ../pennylane_lightning/src --capture --output-file coverage.info + for file in *runner ; do ./$file; done; + lcov --directory . -b ../pennylane_lightning/core/src --capture --output-file coverage.info lcov --remove coverage.info '/usr/*' --output-file coverage.info - mv coverage.info coverage-${{ github.job }}.info + mv coverage.info coverage-${{ github.job }}-${{ matrix.pl_backend }}.info - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-cpp - path: ./BuildCov/coverage-${{ github.job }}.info + path: ./BuildCov/coverage-${{ github.job }}-${{ matrix.pl_backend }}.info + pythontests: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit"] name: Python tests runs-on: ${{ matrix.os }} @@ -89,7 +102,7 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Install dependencies run: sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION @@ -97,52 +110,49 @@ jobs: - name: Get required Python packages run: | cd main - python -m pip install --upgrade pip - pip install -r requirements-dev.txt + python -m pip install -r requirements-dev.txt - name: Install ML libraries for interfaces run: | - pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install --upgrade "jax[cpu]" # This also installs jaxlib - pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION + python -m pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html + python -m pip install --upgrade "jax[cpu]" # This also installs jaxlib + python -m pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION - - name: Install lightning.qubit device + - name: Install backend device run: | cd main - CMAKE_ARGS="-DENABLE_KOKKOS=OFF -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" pip install -e . -vv + CMAKE_ARGS="-DPL_BACKEND=${{ matrix.pl_backend }} -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install -e . -vv - name: Run PennyLane-Lightning unit tests run: | cd main/ - pytest tests/ $COVERAGE_FLAGS - pl-device-test --device lightning.qubit --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append - pl-device-test --device lightning.qubit --shots=None --skip-ops $COVERAGE_FLAGS --cov-append - mv coverage.xml coverage-${{ github.job }}.xml + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS + pl-device-test --device ${DEVICENAME} --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append + pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append + mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-python - path: ./main/coverage-${{ github.job }}.xml + path: ./main/coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml cpptestswithOpenBLAS: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit"] - name: C++ tests (Linux, OpenBLAS) + name: C++ tests (OpenBLAS) runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -154,49 +164,58 @@ jobs: - name: Build and run unit tests run: | - cmake . -BBuild -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_PYTHON=OFF -DENABLE_BLAS=ON -DENABLE_KOKKOS=OFF -DBUILD_TESTS=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuild -G Ninja \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DENABLE_PYTHON=OFF \ + -DENABLE_BLAS=ON \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DBUILD_TESTS=ON \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./Build cd ./Build mkdir -p ./tests/results - ./pennylane_lightning_test_runner --order lex --reporter junit --out ./tests/results/report_${{ github.job }}.xml + for file in *runner ; do ./$file --order lex --reporter junit --out ./tests/results/report_$file.xml; done; - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: - name: ubuntu-tests-reports - path: ./Build/tests/results/report_${{ github.job }}.xml + name: ubuntu-tests-reports-${{ github.job }}-${{ matrix.pl_backend }} + path: ./Build/tests/results/ - name: Build and run unit tests for code coverage run: | - cmake . -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DENABLE_PYTHON=OFF -DENABLE_BLAS=ON -DENABLE_KOKKOS=OFF -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuildCov -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_TESTS=ON \ + -DENABLE_PYTHON=OFF \ + -DENABLE_BLAS=ON \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DENABLE_COVERAGE=ON \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./BuildCov cd ./BuildCov - ./pennylane_lightning_test_runner - lcov --directory . -b ../pennylane_lightning/src --capture --output-file coverage.info + for file in *runner ; do ./$file; done; + lcov --directory . -b ../pennylane_lightning/core/src --capture --output-file coverage.info lcov --remove coverage.info '/usr/*' --output-file coverage.info - mv coverage.info coverage-${{ github.job }}.info + mv coverage.info coverage-${{ github.job }}-${{ matrix.pl_backend }}.info - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-cpp - path: ./BuildCov/coverage-${{ github.job }}.info + path: ./BuildCov/coverage-${{ github.job }}-${{ matrix.pl_backend }}.info - pythontestswithBLAS: + pythontestswithOpenBLAS: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit"] name: Python tests with OpenBLAS runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 with: @@ -206,7 +225,7 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Install dependencies run: sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-dev @@ -214,36 +233,37 @@ jobs: - name: Get required Python packages run: | cd main - python -m pip install --upgrade pip - pip install -r requirements-dev.txt + python -m pip install -r requirements-dev.txt - name: Install ML libraries for interfaces run: | - pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install --upgrade "jax[cpu]" # This also installs jaxlib - pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION + python -m pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html + python -m pip install --upgrade "jax[cpu]" # This also installs jaxlib + python -m pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION - - name: Install lightning.qubit device + - name: Install backend device run: | cd main - CMAKE_ARGS="-DENABLE_BLAS=ON -DENABLE_KOKKOS=OFF -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" pip install -e . -vv + CMAKE_ARGS="-DPL_BACKEND=${{ matrix.pl_backend }} -DENABLE_BLAS=ON -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install -e . -vv - name: Run PennyLane-Lightning unit tests run: | cd main/ - pytest tests/ $COVERAGE_FLAGS - pl-device-test --device lightning.qubit --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append - pl-device-test --device lightning.qubit --shots=None --skip-ops $COVERAGE_FLAGS --cov-append - mv coverage.xml coverage-${{ github.job }}.xml + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS + pl-device-test --device ${DEVICENAME} --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append + pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append + mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-python - path: ./main/coverage-${{ github.job }}.xml + path: ./main/coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml build_and_cache_Kokkos: - name: "Build and cache Kokkos and Kokkos Kernels" + name: "Build and cache Kokkos" uses: ./.github/workflows/build_and_cache_Kokkos_linux.yml with: os: ubuntu-22.04 @@ -253,22 +273,18 @@ jobs: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_kokkos"] exec_model: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.kokkos_version) }} - name: C++ tests (Linux, Kokkos and Kokkos Kernels) + name: C++ tests (Kokkos) runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -292,52 +308,64 @@ jobs: - name: Build and run unit tests run: | - cmake . -BBuild -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_PYTHON=OFF -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DBUILD_TESTS=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuild -G Ninja \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DBUILD_TESTS=ON \ + -DENABLE_PYTHON=OFF \ + -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./Build cd ./Build - mkdir -p ./tests/results - ./pennylane_lightning_test_runner --order lex --reporter junit --out ./tests/results/report_${{ github.job }}.xml + mkdir -p ./tests/results_${{ github.job }}_${{ matrix.pl_backend }} + for file in *runner ; do ./$file --order lex --reporter junit --out ./tests/results_${{ github.job }}_${{ matrix.pl_backend }}/report_$file.xml; done; - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: - name: ubuntu-tests-reports - path: ./Build/tests/results/report_${{ github.job }}.xml + name: ubuntu-tests-reports-${{ github.job }}_${{ matrix.pl_backend }} + path: ./Build/tests/results_${{ github.job }}_${{ matrix.pl_backend }} - name: Build and run unit tests for code coverage run: | - cmake . -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DENABLE_PYTHON=OFF -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja + cmake . -BBuildCov -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_TESTS=ON \ + -DENABLE_PYTHON=OFF \ + -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos \ + -DPL_BACKEND=${{ matrix.pl_backend }} \ + -DENABLE_COVERAGE=ON \ + -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) cmake --build ./BuildCov cd ./BuildCov - ./pennylane_lightning_test_runner - lcov --directory . -b ../pennylane_lightning/src --capture --output-file coverage.info + for file in *runner ; do ./$file; done; + lcov --directory . -b ../pennylane_lightning/core/src --capture --output-file coverage.info lcov --remove coverage.info '/usr/*' --output-file coverage.info - mv coverage.info coverage-${{ github.job }}.info + mv coverage.info coverage-${{ github.job }}-${{ matrix.pl_backend }}.info - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-cpp - path: ./BuildCov/coverage-${{ github.job }}.info + path: ./BuildCov/coverage-${{ github.job }}-${{ matrix.pl_backend }}.info pythontestswithKokkos: needs: [build_and_cache_Kokkos] strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_kokkos", "all"] exec_model: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.kokkos_version) }} + exclude: + - pl_backend: ["all"] + exec_model: OPENMP - name: Python tests with Kokkos and Kokkos Kernels + name: Python tests with Kokkos runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 with: @@ -347,7 +375,7 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Restoring cached dependencies id: kokkos-cache @@ -368,184 +396,62 @@ jobs: - name: Get required Python packages run: | cd main - python -m pip install --upgrade pip - pip install -r requirements-dev.txt + python -m pip install -r requirements-dev.txt - name: Install ML libraries for interfaces run: | - pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install --upgrade "jax[cpu]" # This also installs jaxlib - pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION + python -m pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html + python -m pip install --upgrade "jax[cpu]" # This also installs jaxlib + python -m pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION - - name: Install lightning.qubit device + - name: Install backend device + if: ${{ matrix.pl_backend != 'all'}} run: | cd main - CMAKE_ARGS="-DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" pip install -e . -vv + SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv + CMAKE_ARGS="-DPL_BACKEND=${{ matrix.pl_backend }} -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install -e . -vv - name: Run PennyLane-Lightning unit tests + if: ${{ matrix.pl_backend != 'all'}} run: | cd main/ - pytest tests/ $COVERAGE_FLAGS - pl-device-test --device lightning.qubit --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append - pl-device-test --device lightning.qubit --shots=None --skip-ops $COVERAGE_FLAGS --cov-append + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS + pl-device-test --device ${DEVICENAME} --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append + pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append mv coverage.xml coverage-${{ github.job }}.xml - - name: Upload code coverage results - uses: actions/upload-artifact@v3 - with: - name: ubuntu-codecov-results-python - path: ./main/coverage-${{ github.job }}.xml - - cpptestswithKokkosAndOpenBLAS: - needs: [build_and_cache_Kokkos] - strategy: - matrix: - os: [ubuntu-22.04] - exec_model: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.exec_model) }} - kokkos_version: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.kokkos_version) }} - - name: C++ tests (Linux, OpenBLAS, Kokkos and Kokkos Kernels) - runs-on: ${{ matrix.os }} - - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - - uses: actions/setup-python@v4 - name: Install Python - with: - python-version: '3.8' - - - name: Checkout PennyLane-Lightning - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Restoring cached dependencies - id: kokkos-cache - uses: actions/cache@v3 - with: - path: ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} - key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - - - name: Copy cached libraries - run: | - mkdir Kokkos/ - cp -rf ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ - - - name: Install dependencies - run: sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-dev ninja-build gcovr lcov - - - name: Build and run unit tests - run: | - cmake . -BBuild -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_BLAS=ON -DENABLE_PYTHON=OFF -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DBUILD_TESTS=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja - cmake --build ./Build - cd ./Build - mkdir -p ./tests/results - ./pennylane_lightning_test_runner --order lex --reporter junit --out ./tests/results/report_${{ github.job }}.xml - - - name: Upload test results - uses: actions/upload-artifact@v3 - if: always() - with: - name: ubuntu-tests-reports - path: ./Build/tests/results/report_${{ github.job }}.xml - - - name: Build and run unit tests for code coverage - run: | - cmake . -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DENABLE_BLAS=ON -DENABLE_PYTHON=OFF -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON -DCMAKE_CXX_COMPILER="$(which g++-$GCC_VERSION)" -G Ninja - cmake --build ./BuildCov - cd ./BuildCov - ./pennylane_lightning_test_runner - lcov --directory . -b ../pennylane_lightning/src --capture --output-file coverage.info - lcov --remove coverage.info '/usr/*' --output-file coverage.info - mv coverage.info coverage-${{ github.job }}.info - - - name: Upload code coverage results - uses: actions/upload-artifact@v3 - with: - name: ubuntu-codecov-results-cpp - path: ./BuildCov/coverage-${{ github.job }}.info - - pythontestswithKokkosAndOpenBLAS: - needs: [build_and_cache_Kokkos] - strategy: - matrix: - os: [ubuntu-22.04] - exec_model: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.exec_model) }} - kokkos_version: ${{ fromJson(needs.build_and_cache_Kokkos.outputs.kokkos_version) }} - - name: Python tests with OpenBLAS, Kokkos and Kokkos Kernels - runs-on: ${{ matrix.os }} - - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - - name: Checkout PennyLane-Lightning - uses: actions/checkout@v3 - with: - path: main - fetch-depth: 2 - - - uses: actions/setup-python@v4 - name: Install Python - with: - python-version: '3.8' - - - name: Restoring cached dependencies - id: kokkos-cache - uses: actions/cache@v3 - with: - path: ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} - key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - - - name: Copy cached libraries - run: | - mkdir Kokkos/ - cp -rf ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ - pwd - - - name: Install dependencies - run: sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-dev - - - name: Get required Python packages - run: | - cd main - python -m pip install --upgrade pip - pip install -r requirements-dev.txt - - - name: Install ML libraries for interfaces - run: | - pip install --upgrade torch==$TORCH_VERSION -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install --upgrade "jax[cpu]" # This also installs jaxlib - pip install --upgrade tensorflow~=$TF_VERSION keras~=$TF_VERSION - - - name: Install lightning.qubit device + - name: Install all backend devices + if: ${{ matrix.pl_backend == 'all' }} run: | cd main - CMAKE_ARGS="-DENABLE_BLAS=ON -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" pip install -e . -vv - - - name: Run PennyLane-Lightning unit tests + CMAKE_ARGS="-DPL_BACKEND="lightning_qubit" -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DENABLE_KOKKOS=ON -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install -e . -vv + rm -rf build + CMAKE_ARGS="-DPL_BACKEND="lightning_kokkos" -DCMAKE_PREFIX_PATH=${{ github.workspace }}/Kokkos -DENABLE_KOKKOS=ON -DENABLE_PYTHON=ON -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install -e . -vv + + - name: Run PennyLane-Lightning unit tests for lightning.qubit with all devices installed + if: ${{ matrix.pl_backend == 'all' }} run: | cd main/ - pytest tests/ $COVERAGE_FLAGS + PL_DEVICE=lightning.qubit python -m pytest tests/ $COVERAGE_FLAGS pl-device-test --device lightning.qubit --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append pl-device-test --device lightning.qubit --shots=None --skip-ops $COVERAGE_FLAGS --cov-append - mv coverage.xml coverage-${{ github.job }}.xml + PL_DEVICE=lightning.kokkos python -m pytest tests/ $COVERAGE_FLAGS + pl-device-test --device lightning.kokkos --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append + pl-device-test --device lightning.kokkos --shots=None --skip-ops $COVERAGE_FLAGS --cov-append + mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml - name: Upload code coverage results uses: actions/upload-artifact@v3 with: name: ubuntu-codecov-results-python - path: ./main/coverage-${{ github.job }}.xml + path: ./main/coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml upload-to-codecov-linux-python: - needs: [pythontests, pythontestswithBLAS, pythontestswithKokkos, pythontestswithKokkosAndOpenBLAS] + needs: [pythontests, pythontestswithOpenBLAS, pythontestswithKokkos] name: Upload coverage data to codecov runs-on: ubuntu-latest steps: @@ -564,7 +470,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} upload-to-codecov-linux-cpp: - needs: [cpptests, cpptestswithOpenBLAS, cpptestswithKokkos, cpptestswithKokkosAndOpenBLAS] + needs: [cpptests, cpptestswithOpenBLAS, cpptestswithKokkos] name: Upload coverage data to codecov runs-on: ubuntu-latest steps: @@ -580,4 +486,4 @@ jobs: uses: codecov/codecov-action@v3 with: fail_ci_if_error: true - token: ${{ secrets.CODECOV_TOKEN }} + token: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index ae30f02e38..6ccc95f0c8 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -5,6 +5,10 @@ on: - master pull_request: +concurrency: + group: tests_windows-${{ github.ref }} + cancel-in-progress: true + jobs: cpptests: name: C++ tests (Windows) @@ -12,12 +16,10 @@ jobs: strategy: matrix: os: [windows-latest] + pl_backend: ["lightning_qubit"] steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - uses: actions/checkout@v3 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 with: fetch-depth: 2 @@ -33,25 +35,39 @@ jobs: - name: Build and run unit tests for code coverage run: | - cmake ./pennylane_lightning/src -BBuild -DBUILD_TESTS=ON -DENABLE_OPENMP=OFF -DENABLE_WARNINGS=OFF - cmake --build ./Build --config Debug - mkdir -p ./Build/tests/results - .\Build\tests\Debug\pennylane_lightning_test_runner.exe --order lex --reporter junit --out .\Build\tests\results\report_cpptests.xml - OpenCppCoverage --sources pennylane_lightning\src --export_type cobertura:coverage.xml Build\tests\Debug\pennylane_lightning_test_runner.exe - Move-Item -Path .\coverage.xml -Destination .\coverage-${{ github.job }}.xml + cmake -BBuild ` + -DBUILD_TESTS=ON ` + -DENABLE_OPENMP=OFF ` + -DENABLE_PYTHON=OFF ` + -DENABLE_GATE_DISPATCHER=OFF ` + -DPL_BACKEND=${{ matrix.pl_backend }} ` + -DENABLE_WARNINGS=OFF + cmake --build .\Build --config Debug + mkdir -p .\Build\tests\results + $test_bins = Get-ChildItem -Include *.exe -Recurse -Path ./Build/Debug + foreach ($file in $test_bins) + { + $filename = $file.ToString() -replace '.{4}$' + $filename = $filename.Substring($filename.LastIndexOf("\")+1) + $test_call = $file.ToString() + " --order lex --reporter junit --out .\Build\tests\results\report_" + $filename + ".xml" + Invoke-Expression $test_call + $cov_call = "OpenCppCoverage --sources pennylane_lightning\core\src --export_type cobertura:coverage.xml " + $file.ToString() + Invoke-Expression $cov_call + } + Move-Item -Path .\coverage.xml -Destination .\coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: - name: windows-test-report - path: .\Build\tests\results\report_${{ github.job }}.xml + name: windows-test-report-${{ github.job }}-${{ matrix.pl_backend }} + path: .\Build\tests\results\ - name: Upload coverage results uses: actions/upload-artifact@v3 with: name: windows-coverage-report - path: .\coverage-${{ github.job }}.xml + path: .\coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml win-set-matrix-x86: @@ -59,11 +75,6 @@ jobs: runs-on: ubuntu-latest steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -84,19 +95,14 @@ jobs: strategy: fail-fast: false matrix: - os: [windows-2019] + os: [windows-latest] exec_model: ${{ fromJson(needs.win-set-matrix-x86.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.win-set-matrix-x86.outputs.kokkos_version) }} - name: Kokkos core & kernels (${{ matrix.exec_model }}) + name: Kokkos core (${{ matrix.exec_model }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Cache installation directories id: kokkos-cache uses: actions/cache@v3 @@ -111,10 +117,6 @@ jobs: git clone https://github.com/kokkos/kokkos.git cd D:\a\kokkos git checkout ${{ matrix.kokkos_version }} - cd D:\a\ - git clone https://github.com/kokkos/kokkos-kernels.git - cd D:\a\kokkos-kernels - git checkout ${{ matrix.kokkos_version }} cd .. - name: Create installation directory @@ -134,6 +136,7 @@ jobs: cd D:\a\kokkos cmake -BBuild . -DCMAKE_INSTALL_PREFIX=D:\a\install_dir\${{ matrix.exec_model }} ` -DKokkos_ENABLE_COMPLEX_ALIGN=OFF ` + -DKokkos_ENABLE_SERIAL=ON ` -DKokkos_ENABLE_${{ matrix.exec_model }}=ON ` -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF ` -DCMAKE_CXX_STANDARD=20 ` @@ -143,37 +146,19 @@ jobs: cmake --build ./Build --config Debug --verbose cmake --install ./Build --config Debug --verbose - - name: Build Kokkos kernels library - if: steps.kokkos-cache.outputs.cache-hit != 'true' - run: | - cd D:\a\kokkos-kernels - cmake -BBuild . -DCMAKE_INSTALL_PREFIX=D:\a\install_dir\${{ matrix.exec_model }} ` - -DKokkos_ENABLE_${{ matrix.exec_model }}=ON ` - -DCMAKE_CXX_STANDARD=20 ` - -DCMAKE_PREFIX_PATH=D:\a\install_dir\${{ matrix.exec_model }} ` - -DCMAKE_POSITION_INDEPENDENT_CODE=ON ` - -DCMAKE_BUILD_TYPE=Debug ` - -T clangcl - cmake --build ./Build --config Debug --verbose - cmake --install ./Build --config Debug --verbose - cpptestswithkokkos: needs: [build_dependencies, win-set-matrix-x86] strategy: matrix: - os: [windows-2019] + os: [windows-latest] + pl_backend: ["lightning_qubit"] exec_model: ${{ fromJson(needs.win-set-matrix-x86.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.win-set-matrix-x86.outputs.kokkos_version) }} - name: C++ tests (Windows, Kokkos and Kokkos Kernels) + name: C++ tests (Windows, Kokkos) runs-on: ${{ matrix.os }} steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Restoring cached dependencies id: kokkos-cache uses: actions/cache@v3 @@ -181,7 +166,8 @@ jobs: path: D:\a\install_dir\${{ matrix.exec_model }} key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }}-Debug - - uses: actions/checkout@v3 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 - name: Copy cached libraries if: steps.kokkos-cache.outputs.cache-hit == 'true' @@ -207,25 +193,40 @@ jobs: run: | Subst Z: (pwd) Set-Location -Path "Z:\" - cmake ./pennylane_lightning/src -BBuild -DBUILD_TESTS=ON -DENABLE_KOKKOS=ON -DCMAKE_PREFIX_PATH=D:\a\pennylane-lightning\pennylane-lightning\Kokkos -DENABLE_OPENMP=OFF -DENABLE_WARNINGS=OFF -T clangcl - cmake --build ./Build --config Debug -- /p:UseMultiToolTask=true /p:EnforceProcessCountAcrossBuilds=true /p:MultiProcMaxCount=2 - mkdir -p ./Build/tests/results - .\Build\tests\Debug\pennylane_lightning_test_runner.exe --order lex --reporter junit --out .\Build\tests\results\report_${{ github.job }}.xml - OpenCppCoverage --sources pennylane_lightning\src --export_type cobertura:coverage.xml Build\tests\Debug\pennylane_lightning_test_runner.exe - Move-Item -Path .\coverage.xml -Destination .\coverage-${{ github.job }}.xml + cmake -BBuild ` + -DBUILD_TESTS=ON ` + -DENABLE_PYTHON=OFF ` + -DENABLE_GATE_DISPATCHER=OFF ` + -DCMAKE_PREFIX_PATH=D:\a\pennylane-lightning\pennylane-lightning\Kokkos ` + -DENABLE_OPENMP=OFF ` + -DPL_BACKEND=${{ matrix.pl_backend }} ` + -DENABLE_WARNINGS=OFF -T clangcl + cmake --build .\Build --config Debug -- /p:UseMultiToolTask=true /p:EnforceProcessCountAcrossBuilds=true /p:MultiProcMaxCount=2 + mkdir -p .\Build\tests\results + $test_bins = Get-ChildItem -Include *.exe -Recurse -Path ./Build/Debug + foreach ($file in $test_bins) + { + $filename = $file.ToString() -replace '.{4}$' + $filename = $filename.Substring($filename.LastIndexOf("\")+1) + $test_call = $file.ToString() + " --order lex --reporter junit --out .\Build\tests\results\report_" + $filename + ".xml" + Invoke-Expression $test_call + $cov_call = "OpenCppCoverage --sources pennylane_lightning\core\src --export_type cobertura:coverage.xml " + $file.ToString() + Invoke-Expression $cov_call + } + Move-Item -Path .\coverage.xml -Destination .\coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: - name: windows-test-report - path: .\Build\tests\results\report_${{ github.job }}.xml + name: windows-test-report-${{ github.job }}-${{ matrix.pl_backend }} + path: .\Build\tests\results\ - name: Upload coverage results uses: actions/upload-artifact@v3 with: name: windows-coverage-report - path: .\coverage-${{ github.job }}.xml + path: .\coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml upload-to-codecov-windows: needs: [cpptests, cpptestswithKokkos] @@ -244,4 +245,4 @@ jobs: uses: codecov/codecov-action@v3 with: fail_ci_if_error: true - token: ${{ secrets.CODECOV_TOKEN }} + token: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/tests_without_binary.yml b/.github/workflows/tests_without_binary.yml index 10c6ed9e0d..e674728e95 100644 --- a/.github/workflows/tests_without_binary.yml +++ b/.github/workflows/tests_without_binary.yml @@ -8,6 +8,10 @@ on: env: COVERAGE_FLAGS: "--cov=pennylane_lightning --cov-report=term-missing --cov-report=xml:./coverage.xml --no-flaky-report -p no:warnings --tb=native" +concurrency: + group: tests_without_binary-${{ github.ref }} + cancel-in-progress: true + jobs: pythontests: name: Python tests @@ -15,13 +19,9 @@ jobs: strategy: matrix: os: [ubuntu-22.04] + pl_backend: ["lightning_qubit", "lightning_kokkos"] steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 with: @@ -30,31 +30,38 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Get required Python packages run: | cd main - python -m pip install --upgrade pip - pip install -r requirements-dev.txt + python -m pip install -r requirements-dev.txt - name: Install lightning.qubit device - run: | - cd main - pip install -e . env: SKIP_COMPILATION: True + PL_BACKEND: ${{ matrix.pl_backend }} + run: | + cd main + python -m pip install -e . -vv + + - name: Install the new pennylane_lightning package + if: ${{ matrix.pl_backend == 'lightning_kokkos'}} + run: | + cd main + SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv - name: Run PennyLane-Lightning unit tests run: | cd main/ - pytest tests/ $COVERAGE_FLAGS - pl-device-test --device lightning.qubit --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append - pl-device-test --device lightning.qubit --shots=None --skip-ops $COVERAGE_FLAGS --cov-append + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS + pl-device-test --device ${DEVICENAME} --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append + pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: files: ./main/coverage.xml fail_ci_if_error: true - token: ${{ secrets.CODECOV_TOKEN }} + token: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/update_dev_version.yml b/.github/workflows/update_dev_version.yml index 899bb66b3c..bfa66e1e8b 100644 --- a/.github/workflows/update_dev_version.yml +++ b/.github/workflows/update_dev_version.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Checkout PennyLane-Lightning PR uses: actions/checkout@v3 diff --git a/.github/workflows/wheel_linux_aarch64.yml b/.github/workflows/wheel_linux_aarch64.yml index 745e26cf5c..972b3661db 100644 --- a/.github/workflows/wheel_linux_aarch64.yml +++ b/.github/workflows/wheel_linux_aarch64.yml @@ -1,7 +1,7 @@ name: Wheel::Linux::ARM # **What it does**: Builds python wheels for Linux (ubuntu-latest) architecture ARM 64 and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -12,6 +12,10 @@ on: release: types: [published] +concurrency: + group: wheel_linux_aarch64-${{ github.ref }} + cancel-in-progress: true + jobs: set_wheel_build_matrix: name: "Set wheel build matrix" @@ -19,29 +23,99 @@ jobs: with: event_name: ${{ github.event_name }} - linux-wheels-aarch64: + build_dependencies: needs: [set_wheel_build_matrix] + strategy: + matrix: + os: [ubuntu-latest] + arch: [aarch64] + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + container_img: ["quay.io/pypa/manylinux2014_aarch64"] + + name: Kokkos core (${{ matrix.exec_model }}::${{ matrix.arch }}) + runs-on: ${{ matrix.os }} + + steps: + - name: Cache installation directories + id: kokkos-cache + uses: actions/cache@v3 + with: + path: ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} + + - name: Clone Kokkos libs + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + git clone https://github.com/kokkos/kokkos.git + cd kokkos + git checkout ${{ matrix.kokkos_version }} + cd - + pushd . &> /dev/null + + - uses: docker/setup-qemu-action@v2 + name: Set up QEMU + + - name: Build Kokkos core library + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + mkdir -p ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + cd kokkos + docker run --platform linux/aarch64 \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v `pwd`:/io \ + -v ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }}:/install \ + -i ${{ matrix.container_img }} \ + bash -c "git config --global --add safe.directory /io && \ + cd /io && \ + python3.9 -m pip install ninja && \ + ln -s /opt/python/cp39-cp39/bin/ninja /usr/bin/ninja && \ + cmake -BBuild . -DCMAKE_INSTALL_PREFIX=/install \ + -DKokkos_ENABLE_COMPLEX_ALIGN=OFF \ + -DKokkos_ENABLE_SERIAL=ON \ + -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ + -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF \ + -DCMAKE_CXX_STANDARD=20 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -G Ninja && \ + cmake --build ./Build --verbose && \ + cmake --install ./Build; " + cd - + + linux-wheels-aarch64: + needs: [set_wheel_build_matrix, build_dependencies] strategy: fail-fast: false matrix: os: [ubuntu-latest] arch: [aarch64] + pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} container_img: ["quay.io/pypa/manylinux2014_aarch64"] - name: ubuntu-latest::aarch64 (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + name: ${{ matrix.os }}::${{ matrix.arch }} - ${{ matrix.pl_backend }} (Python ${{ fromJson('{ "cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 + + - name: Restoring cached dependencies + id: kokkos-cache + uses: actions/cache@v3 with: - access_token: ${{ github.token }} + path: ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - - uses: actions/checkout@v3 + - name: Copy cached libraries + run: | + mkdir Kokkos + cp -rf ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ - name: Install cibuildwheel - run: python3 -m pip install cibuildwheel~=2.11.0 + run: python -m pip install cibuildwheel~=2.11.0 - uses: docker/setup-qemu-action@v2 name: Set up QEMU @@ -57,12 +131,23 @@ jobs: # Python build settings CIBW_BEFORE_BUILD: | cat /etc/yum.conf | sed "s/\[main\]/\[main\]\ntimeout=5/g" > /etc/yum.conf - pip install ninja cmake~=3.24.0 + python -m pip install ninja cmake~=3.24.0 + + CIBW_ENVIRONMENT: | + PL_BACKEND="${{ matrix.pl_backend }}" CIBW_MANYLINUX_AARCH64_IMAGE: manylinux2014 CIBW_BUILD_VERBOSITY: 3 + CIBW_TEST_REQUIRES: pytest pytest-cov pytest-mock flaky + + CIBW_BEFORE_TEST: if ${{ matrix.pl_backend == 'lightning_kokkos'}}; then SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv; fi + + CIBW_TEST_COMMAND: | + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + pl-device-test --device=${DEVICENAME} --skip-ops -x --tb=short --no-flaky-report + run: python3 -m cibuildwheel --output-dir wheelhouse - name: Validate wheels @@ -70,26 +155,31 @@ jobs: python3 -m pip install twine python3 -m twine check ./wheelhouse/*.whl - - uses: actions-ecosystem/action-regex-match@v2 + - uses: actions-ecosystem/action-regex-match@main id: rc_build with: text: ${{ github.event.pull_request.head.ref }} regex: '.*[0-9]+.[0-9]+.[0-9]+[-_]?rc[0-9]+' - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' }} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: linux-wheels-aarch64 - runs-on: ubuntu-latest + matrix: + os: [ubuntu-latest] + arch: [aarch64] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: Linux-wheels-aarch64.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -97,4 +187,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.github/workflows/wheel_linux_ppc64le.yml b/.github/workflows/wheel_linux_ppc64le.yml index 02338bff0a..262cf7b86f 100644 --- a/.github/workflows/wheel_linux_ppc64le.yml +++ b/.github/workflows/wheel_linux_ppc64le.yml @@ -1,7 +1,7 @@ name: Wheel::Linux::PowerPC # **What it does**: Builds python wheels for Linux (ubuntu-latest) architecture PowerPC 64 and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -12,6 +12,10 @@ on: release: types: [published] +concurrency: + group: wheel_linux_ppc64le-${{ github.ref }} + cancel-in-progress: true + jobs: set_wheel_build_matrix: name: "Set wheel build matrix" @@ -19,29 +23,99 @@ jobs: with: event_name: ${{ github.event_name }} - linux-wheels-ppc64le: + build_dependencies: needs: [set_wheel_build_matrix] + strategy: + matrix: + os: [ubuntu-latest] + arch: [ppc64le] + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + container_img: ["quay.io/pypa/manylinux2014_ppc64le"] + + name: Kokkos core (${{ matrix.exec_model }}::${{ matrix.arch }}) + runs-on: ${{ matrix.os }} + + steps: + - name: Cache installation directories + id: kokkos-cache + uses: actions/cache@v3 + with: + path: ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} + + - name: Clone Kokkos libs + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + git clone https://github.com/kokkos/kokkos.git + cd kokkos + git checkout ${{ matrix.kokkos_version }} + cd - + pushd . &> /dev/null + + - uses: docker/setup-qemu-action@v2 + name: Set up QEMU + + - name: Build Kokkos core library + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + mkdir -p ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + cd kokkos + docker run --platform linux/ppc64le \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v `pwd`:/io \ + -v ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }}:/install \ + -i ${{ matrix.container_img }} \ + bash -c "git config --global --add safe.directory /io && \ + cd /io && \ + python3.9 -m pip install ninja && \ + ln -s /opt/python/cp39-cp39/bin/ninja /usr/bin/ninja && \ + cmake -BBuild . -DCMAKE_INSTALL_PREFIX=/install \ + -DKokkos_ENABLE_COMPLEX_ALIGN=OFF \ + -DKokkos_ENABLE_SERIAL=ON \ + -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ + -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF \ + -DCMAKE_CXX_STANDARD=20 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -G Ninja && \ + cmake --build ./Build --verbose && \ + cmake --install ./Build; " + cd - + + linux-wheels-ppc64le: + needs: [set_wheel_build_matrix, build_dependencies] strategy: fail-fast: false matrix: os: [ubuntu-latest] arch: [ppc64le] + pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{fromJson(needs.set_wheel_build_matrix.outputs.python_version)}} + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} container_img: ["quay.io/pypa/manylinux2014_ppc64le"] - name: ubuntu-latest::ppc64le (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + name: ${{ matrix.os }}::${{ matrix.arch }} - ${{ matrix.pl_backend }} (Python ${{ fromJson('{"cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 + + - name: Restoring cached dependencies + id: kokkos-cache + uses: actions/cache@v3 with: - access_token: ${{ github.token }} + path: ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - - uses: actions/checkout@v3 + - name: Copy cached libraries + run: | + mkdir Kokkos + cp -rf ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ - name: Install cibuildwheel - run: python3 -m pip install cibuildwheel~=2.11.0 + run: python -m pip install cibuildwheel~=2.11.0 - uses: docker/setup-qemu-action@v2 name: Set up QEMU @@ -57,7 +131,10 @@ jobs: # Python build settings CIBW_BEFORE_BUILD: | cat /etc/yum.conf | sed "s/\[main\]/\[main\]\ntimeout=5/g" > /etc/yum.conf - pip install ninja cmake~=3.24.0 + python -m pip install ninja cmake~=3.24.0 + + CIBW_ENVIRONMENT: | + PL_BACKEND="${{ matrix.pl_backend }}" CIBW_MANYLINUX_PPC64LE_IMAGE: manylinux2014 @@ -70,26 +147,31 @@ jobs: python3 -m pip install twine python3 -m twine check ./wheelhouse/*.whl - - uses: actions-ecosystem/action-regex-match@v2 + - uses: actions-ecosystem/action-regex-match@main id: rc_build with: text: ${{ github.event.pull_request.head.ref }} regex: '.*[0-9]+.[0-9]+.[0-9]+[-_]?rc[0-9]+' - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != ''}} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: linux-wheels-ppc64le - runs-on: ubuntu-latest + matrix: + os: [ubuntu-latest] + arch: [ppc64le] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: Linux-wheels-ppc64le.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -97,4 +179,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 1c4437cada..fb5736d5a6 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -1,7 +1,7 @@ name: Wheel::Linux::x86_64 # **What it does**: Builds python wheels for Linux (ubuntu-latest) architecture x86_64 and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -16,6 +16,10 @@ on: release: types: [published] +concurrency: + group: wheel_linux_x86_64-${{ github.ref }} + cancel-in-progress: true + jobs: set_wheel_build_matrix: name: "Set wheel build matrix" @@ -23,29 +27,99 @@ jobs: with: event_name: ${{ github.event_name }} - linux-wheels-x86-64: + build_dependencies: needs: [set_wheel_build_matrix] + strategy: + matrix: + os: [ubuntu-latest] + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + container_img: ["quay.io/pypa/manylinux2014_x86_64"] + + name: Kokkos core (${{ matrix.exec_model }}) + runs-on: ${{ matrix.os }} + container: ${{ matrix.container_img }} + + steps: + - name: Cache installation directories + id: kokkos-cache + uses: actions/cache@v3 + with: + path: /root/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} + + - name: Install dependencies (Ubuntu) + if: ${{ (matrix.container_img == 'ubuntu-latest') && (steps.kokkos-cache.outputs.cache-hit != 'true') }} + run: | + apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION ninja-build git + echo "COMPILER=g++-11" >> $GITHUB_ENV + + - name: Install dependencies (CentOS) + if: ${{ (matrix.container_img == 'quay.io/pypa/manylinux2014_x86_64') && (steps.kokkos-cache.outputs.cache-hit != 'true') }} + run: | + yum update -y && yum install -y cmake ninja-build + echo "COMPILER=g++" >> $GITHUB_ENV + + - name: Clone Kokkos libs + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + git clone https://github.com/kokkos/kokkos.git + cd kokkos + git checkout ${{ matrix.kokkos_version }} + cd - + pushd . &> /dev/null + + - name: Build Kokkos core library + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + mkdir -p /root/Kokkos_install/${{ matrix.exec_model }} + cd kokkos + cmake -BBuild . -DCMAKE_INSTALL_PREFIX=/root/Kokkos_install/${{ matrix.exec_model }} \ + -DKokkos_ENABLE_COMPLEX_ALIGN=OFF \ + -DKokkos_ENABLE_SERIAL=ON \ + -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ + -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF \ + -DCMAKE_CXX_COMPILER=${{ env.COMPILER }} \ + -DCMAKE_CXX_STANDARD=20 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -G Ninja + cmake --build ./Build --verbose + cmake --install ./Build + cd - + + linux-wheels-x86-64: + needs: [set_wheel_build_matrix, build_dependencies] strategy: fail-fast: false matrix: os: [ubuntu-latest] arch: [x86_64] + pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} container_img: ["quay.io/pypa/manylinux2014_x86_64"] - name: ${{ matrix.os }} (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + name: ${{ matrix.os }}::${{ matrix.arch }} - ${{ matrix.pl_backend }} (Python ${{ fromJson('{ "cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} container: ${{ matrix.container_img }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 + - name: Restoring cached dependencies + id: kokkos-cache + uses: actions/cache@v3 with: - access_token: ${{ github.token }} + path: /root/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.container_img }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 + - name: Copy cached libraries + run: | + mkdir Kokkos + cp -rf /root/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ + - name: Install dependencies (CentOS) if: ${{ (matrix.container_img == 'quay.io/pypa/manylinux2014_x86_64') }} run: | @@ -54,7 +128,7 @@ jobs: yum update -y && yum install -y docker - name: Install cibuildwheel - run: python3.8 -m pip install cibuildwheel~=2.11.0 + run: python3.9 -m pip install cibuildwheel~=2.11.0 - name: Build wheels env: @@ -67,54 +141,64 @@ jobs: # Python build settings CIBW_BEFORE_BUILD: | cat /etc/yum.conf | sed "s/\[main\]/\[main\]\ntimeout=5/g" > /etc/yum.conf - pip install ninja cmake~=3.24.0 + python -m pip install ninja cmake~=3.24.0 yum clean all -y yum install centos-release-scl-rh -y yum install devtoolset-11-gcc-c++ -y source /opt/rh/devtoolset-11/enable -y CIBW_ENVIRONMENT: | - PATH=/opt/rh/devtoolset-11/root/usr/bin:$PATH + PATH="/opt/rh/devtoolset-11/root/usr/bin:$PATH" \ + PL_BACKEND="${{ matrix.pl_backend }}" + # Testing of built wheels CIBW_TEST_REQUIRES: pytest pytest-cov pytest-mock flaky CIBW_BEFORE_TEST: | - pip install git+https://github.com/PennyLaneAI/pennylane.git@master + python -m pip install git+https://github.com/PennyLaneAI/pennylane.git@master + if ${{ matrix.pl_backend == 'lightning_kokkos'}}; then SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv; fi CIBW_TEST_COMMAND: | - pl-device-test --device=lightning.qubit --skip-ops -x --tb=short --no-flaky-report + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + pl-device-test --device=${DEVICENAME} --skip-ops -x --tb=short --no-flaky-report CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 CIBW_BUILD_VERBOSITY: 3 - run: python3.8 -m cibuildwheel --output-dir wheelhouse + run: python3.9 -m cibuildwheel --output-dir wheelhouse - name: Validate wheels run: | - python3.8 -m pip install twine - python3.8 -m twine check ./wheelhouse/*.whl + python3.9 -m pip install twine + python3.9 -m twine check ./wheelhouse/*.whl - - uses: actions-ecosystem/action-regex-match@v2 + - uses: actions-ecosystem/action-regex-match@main id: rc_build with: text: ${{ github.event.pull_request.head.ref }} regex: '.*[0-9]+.[0-9]+.[0-9]+[-_]?rc[0-9]+' - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != ''}} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: linux-wheels-x86-64 - runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest] + arch: [x86_64] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: Linux-wheels-x86_64.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -122,4 +206,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index 2d7042d3e9..7c3bcd9fba 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -1,7 +1,7 @@ name: Wheel::MacOS::ARM # **What it does**: Builds python wheels for MacOS (11) architecture ARM 64 and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -15,20 +15,19 @@ on: env: ARCHS: 'arm64' - PYTHON3_MIN_VERSION: "8" + PYTHON3_MIN_VERSION: "9" PYTHON3_MAX_VERSION: "11" +concurrency: + group: wheel_macos_arm64-${{ github.ref }} + cancel-in-progress: true + jobs: mac-set-matrix-arm: name: Set builder matrix runs-on: ubuntu-latest steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -55,24 +54,20 @@ jobs: matrix: os: [macos-11] arch: [arm64] + pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{fromJson(needs.mac-set-matrix-arm.outputs.python_version)}} - name: macos-latest::arm64 (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + name: macos-latest::arm64 - ${{ matrix.pl_backend }} (Python ${{ fromJson('{ "cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Install cibuildwheel run: python -m pip install cibuildwheel~=2.11.0 @@ -87,20 +82,24 @@ jobs: # Python build settings CIBW_BEFORE_BUILD: | - pip install pybind11 ninja cmake~=3.24.0 setuptools + python -m pip install pybind11 ninja cmake~=3.24.0 setuptools CIBW_ENVIRONMENT: | - CMAKE_ARGS="-DCMAKE_CXX_COMPILER_TARGET=arm64-apple-macos11 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=ARM64 -DENABLE_OPENMP=OFF" + CMAKE_ARGS="-DCMAKE_CXX_COMPILER_TARGET=arm64-apple-macos11 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=ARM64 -DENABLE_OPENMP=OFF" \ + PL_BACKEND="${{ matrix.pl_backend }}" # Testing of built wheels CIBW_TEST_REQUIRES: pytest pytest-cov pytest-mock flaky - CIBW_BEFORE_TEST: pip install git+https://github.com/PennyLaneAI/pennylane.git@master + CIBW_BEFORE_TEST: | + python -m pip install git+https://github.com/PennyLaneAI/pennylane.git@master + if ${{ matrix.pl_backend == 'lightning_kokkos'}}; then SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv; fi CIBW_TEST_COMMAND: | - pl-device-test --device=lightning.qubit --skip-ops -x --tb=short --no-flaky-report + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + pl-device-test --device=${DEVICENAME} --skip-ops -x --tb=short --no-flaky-report - CIBW_BUILD_VERBOSITY: 1 + CIBW_BUILD_VERBOSITY: 3 CIBW_ARCHS_MACOS: ${{ matrix.arch }} @@ -112,20 +111,26 @@ jobs: python -m pip install twine python -m twine check ./wheelhouse/*.whl - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' }} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: mac-wheels-arm64 - runs-on: ubuntu-latest + strategy: + matrix: + os: [macos-11] + arch: [arm64] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: macOS-wheels-arm64.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -133,4 +138,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index 4a64c81efa..991d1590cc 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -1,7 +1,7 @@ name: Wheel::MacOS::Intel # **What it does**: Builds python wheels for MacOS (10.15) architecture x86_64 and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -16,6 +16,10 @@ on: env: MACOSX_DEPLOYMENT_TARGET: 10.15 +concurrency: + group: wheel_macos_x86_64-${{ github.ref }} + cancel-in-progress: true + jobs: set_wheel_build_matrix: name: "Set wheel build matrix" @@ -23,30 +27,100 @@ jobs: with: event_name: ${{ github.event_name }} - mac-wheels-x86: + build_dependencies: needs: [set_wheel_build_matrix] + strategy: + matrix: + os: [macos-12] + arch: [x86_64] + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + + name: Kokkos (${{ matrix.exec_model }}::${{ matrix.arch }}) + runs-on: ${{ matrix.os }} + + steps: + - name: Cache installation directories + id: kokkos-cache + uses: actions/cache@v3 + with: + path: ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} + + - name: Install clang + run: | + brew install libomp + + - name: Clone Kokkos libs + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + git clone https://github.com/kokkos/kokkos.git + cd kokkos + git checkout ${{ matrix.kokkos_version }} + cd - + pushd . &> /dev/null + + - uses: actions/setup-python@v4 + name: Install Python + with: + python-version: '3.9' + + - name: Build Kokkos core library + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + mkdir -p ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} + cd kokkos + python -m pip install cmake ninja + + cmake -BBuild . -DCMAKE_INSTALL_PREFIX=${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} \ + -DKokkos_ENABLE_COMPLEX_ALIGN=OFF \ + -DKokkos_ENABLE_SERIAL=ON \ + -DKokkos_ENABLE_${{ matrix.exec_model }}=ON \ + -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF \ + -DCMAKE_CXX_STANDARD=20 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DCMAKE_CXX_COMPILER=g++ \ + -DOpenMP_ROOT=$(brew --prefix libomp) \ + -G Ninja + cmake --build ./Build --verbose + cmake --install ./Build + cd - + + mac-wheels-x86: + needs: [set_wheel_build_matrix, build_dependencies] strategy: fail-fast: false matrix: os: [macos-12] arch: [x86_64] + pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{fromJson(needs.set_wheel_build_matrix.outputs.python_version)}} + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} - name: ${{ matrix.os }} (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + name: ${{ matrix.os }} - ${{ matrix.pl_backend }} (Python ${{ fromJson('{ "cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 + + - name: Restoring cached dependencies + id: kokkos-cache + uses: actions/cache@v3 with: - access_token: ${{ github.token }} + path: ${{ github.workspace}}/Kokkos_install/${{ matrix.exec_model }} + key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }} - - uses: actions/checkout@v3 + - name: Copy cached libraries + run: | + mkdir Kokkos + cp -rf ${{ github.workspace }}/Kokkos_install/${{ matrix.exec_model }}/* Kokkos/ - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Install cibuildwheel run: python -m pip install cibuildwheel~=2.11.0 @@ -64,13 +138,18 @@ jobs: CIBW_BEFORE_BUILD: | python -m pip install pybind11 ninja cmake~=3.24.0 setuptools + PL_BACKEND: ${{ matrix.pl_backend }} + # Testing of built wheels CIBW_TEST_REQUIRES: pytest pytest-cov pytest-mock flaky - CIBW_BEFORE_TEST: python -m pip install git+https://github.com/PennyLaneAI/pennylane.git@master + CIBW_BEFORE_TEST: | + python -m pip install git+https://github.com/PennyLaneAI/pennylane.git@master + if ${{ matrix.pl_backend == 'lightning_kokkos'}}; then SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" pip install -e . -vv; fi CIBW_TEST_COMMAND: | - pl-device-test --device=lightning.qubit --skip-ops -x --tb=short --no-flaky-report + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + pl-device-test --device=${DEVICENAME} --skip-ops -x --tb=short --no-flaky-report CIBW_BUILD_VERBOSITY: 1 @@ -84,26 +163,32 @@ jobs: python -m pip install twine python -m twine check ./wheelhouse/*.whl - - uses: actions-ecosystem/action-regex-match@v2 + - uses: actions-ecosystem/action-regex-match@main id: rc_build with: text: ${{ github.event.pull_request.head.ref }} regex: '.*[0-9]+.[0-9]+.[0-9]+[-_]?rc[0-9]+' - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != ''}} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: mac-wheels-x86 - runs-on: ubuntu-latest - if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' }} + strategy: + matrix: + os: [macos-12] + arch: [x86_64] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: macOS-wheels-x86_64.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -111,4 +196,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.github/workflows/wheel_noarch.yml b/.github/workflows/wheel_noarch.yml index 96f0d83a81..739d78a89c 100644 --- a/.github/workflows/wheel_noarch.yml +++ b/.github/workflows/wheel_noarch.yml @@ -1,7 +1,7 @@ name: Wheel::Any::None # **What it does**: Builds a pure python wheel for Linux (ubuntu-latest) and store it as an artifact. -# Python version: 3.8. +# Python version: 3.9. # **Why we have it**: To test the wheel build in the python layer, with no compilation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -13,9 +13,20 @@ on: release: types: [published] +concurrency: + group: wheel_noarch-${{ github.ref }} + cancel-in-progress: true + jobs: build-pure-python-wheel: - runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest] + pl_backend: ["lightning_kokkos", "lightning_qubit"] + + name: ${{ matrix.os }} - Pure Python wheels - ${{ matrix.pl_backend }} (Python 3.9) + runs-on: ${{ matrix.os }} + steps: - name: Checkout PennyLane-Lightning uses: actions/checkout@v3 @@ -24,7 +35,7 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: '3.8' + python-version: '3.9' - name: Upgrade pip run: | @@ -47,20 +58,24 @@ jobs: python -m pip install twine python -m twine check main/dist/*.whl - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' }} with: - name: pure-python-wheels.zip + name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: main/dist/*.whl upload-pypi: needs: build-pure-python-wheel - runs-on: ubuntu-latest + strategy: + matrix: + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' }} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: pure-python-wheels.zip + name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: dist - name: Upload wheels to PyPI diff --git a/.github/workflows/wheel_win_x86_64.yml b/.github/workflows/wheel_win_x86_64.yml index 6786184261..682c44a5ca 100644 --- a/.github/workflows/wheel_win_x86_64.yml +++ b/.github/workflows/wheel_win_x86_64.yml @@ -1,7 +1,7 @@ name: Wheel::Windows::x86_64 # **What it does**: Builds python wheels for Windows (windows-latest) and store it as artifacts. -# Python versions: 3.8, 3.9, 3.10, 3.11. +# Python versions: 3.9, 3.10, 3.11. # **Why we have it**: To build wheels for pennylane-lightning installation. # **Who does it impact**: Wheels to be uploaded to PyPI. @@ -17,6 +17,10 @@ env: DISTUTILS_USE_SDK: 1 MSSdk: 1 +concurrency: + group: wheel_win_x86_64-${{ github.ref }} + cancel-in-progress: true + jobs: set_wheel_build_matrix: name: "Set wheel build matrix" @@ -24,24 +28,93 @@ jobs: with: event_name: ${{ github.event_name }} - win-wheels: + build_dependencies: needs: [set_wheel_build_matrix] strategy: fail-fast: false matrix: - os: [windows-2019] + os: [windows-2022] + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + + name: Kokkos core (${{ matrix.exec_model }}) + runs-on: ${{ matrix.os }} + + steps: + - name: Cache installation directories + id: kokkos-cache + uses: actions/cache@v3 + with: + path: D:\a\install_dir\${{ matrix.exec_model }} + key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }}-RelWithDebInfo + + - name: Clone Kokkos libs + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + cd D:\a + git clone https://github.com/kokkos/kokkos.git + cd D:\a\kokkos + git checkout ${{ matrix.kokkos_version }} + cd .. + + - name: Create installation directory + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + Remove-Item -Path D:\a\install_dir\${{ matrix.exec_model }} -Recurse -Force -ErrorAction Ignore + mkdir -p D:\a\install_dir\${{ matrix.exec_model }} + + - name: Install dependencies + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + python -m pip install cmake build + + - name: Build Kokkos core library + if: steps.kokkos-cache.outputs.cache-hit != 'true' + run: | + cd D:\a\kokkos + cmake -BBuild . -DCMAKE_INSTALL_PREFIX=D:\a\install_dir\${{ matrix.exec_model }} ` + -DKokkos_ENABLE_COMPLEX_ALIGN=OFF ` + -DKokkos_ENABLE_SERIAL=ON ` + -DKokkos_ENABLE_${{ matrix.exec_model }}=ON ` + -DKokkos_ENABLE_DEPRECATION_WARNINGS=OFF ` + -DCMAKE_CXX_STANDARD=20 ` + -DCMAKE_POSITION_INDEPENDENT_CODE=ON ` + -DCMAKE_BUILD_TYPE=RelWithDebInfo ` + -T clangcl + cmake --build ./Build --config RelWithDebInfo --verbose + cmake --install ./Build --config RelWithDebInfo --verbose + + win-wheels: + needs: [set_wheel_build_matrix, build_dependencies] + strategy: + fail-fast: false + matrix: + os: [windows-2022] arch: [AMD64] + pl_backend: ["lightning_qubit"] cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} - name: ${{ matrix.os }} (Python ${{ fromJson('{ "cp38-*":"3.8","cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) + exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} + kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} + + name: ${{ matrix.os }} - ${{ matrix.pl_backend }} (Python ${{ fromJson('{ "cp39-*":"3.9","cp310-*":"3.10","cp311-*":"3.11" }')[matrix.cibw_build] }}) runs-on: ${{ matrix.os }} steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.10.0 + - name: Restoring cached dependencies + id: kokkos-cache + uses: actions/cache@v3 with: - access_token: ${{ github.token }} + path: D:\a\install_dir\${{ matrix.exec_model }} + key: ${{ matrix.os }}-kokkos${{ matrix.kokkos_version }}-${{ matrix.exec_model }}-RelWithDebInfo - - uses: actions/checkout@v3 + - name: Checkout PennyLane-Lightning + uses: actions/checkout@v3 + + - name: Copy cached libraries #Update when merging to pennylane-lightning + if: steps.kokkos-cache.outputs.cache-hit == 'true' + run: | + Copy-Item -Path "D:\a\install_dir\${{ matrix.exec_model }}\" ` + -Destination "D:\a\Lightning-Unification\Lightning-Unification\Kokkos" -Recurse -Force - name: Install cibuildwheel run: python -m pip install cibuildwheel~=2.11.0 wheel @@ -54,13 +127,13 @@ jobs: # Python build settings CIBW_BEFORE_BUILD: | - pip install pybind11 cmake~=3.24.0 build + python -m pip install pybind11 cmake~=3.24.0 build # Testing of built wheels CIBW_TEST_REQUIRES: pytest pytest-cov pytest-mock flaky CIBW_BEFORE_TEST: | - pip install git+https://github.com/PennyLaneAI/pennylane.git@master + python -m pip install git+https://github.com/PennyLaneAI/pennylane.git@master CIBW_TEST_COMMAND: | pl-device-test --device=lightning.qubit --skip-ops -x --tb=short --no-flaky-report @@ -96,26 +169,32 @@ jobs: python -m pip install twine python -m twine check ./wheelhouse/*.whl - - uses: actions-ecosystem/action-regex-match@v2 + - uses: actions-ecosystem/action-regex-match@main id: rc_build with: text: ${{ github.event.pull_request.head.ref }} regex: '.*[0-9]+.[0-9]+.[0-9]+[-_]?rc[0-9]+' - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != ''}} with: - name: ${{ runner.os }}-wheels-${{ matrix.arch }}.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: ./wheelhouse/*.whl upload-pypi: needs: win-wheels - runs-on: ubuntu-latest + strategy: + matrix: + os: [windows-2022] + arch: [AMD64] + pl_backend: ["lightning_qubit"] + runs-on: ${{ matrix.os }} + if: ${{ github.event_name == 'release' || github.ref == 'refs/heads/master'}} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: Windows-wheels-AMD64.zip + name: ${{ runner.os }}-wheels-${{ matrix.pl_backend }}-${{ matrix.arch }}.zip path: dist - name: Upload wheels to PyPI @@ -123,4 +202,4 @@ jobs: with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository_url: https://test.pypi.org/legacy/ + repository_url: https://test.pypi.org/legacy/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index b998491e7e..4b175f072c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,10 @@ venv/ +kokkos/ +prototypes/ doc/_build/ +doc/code/api/ PennyLane_Lightning.egg-info/ +PennyLane_Lightning_Kokkos.egg-info/ build/ Build/ BuildCov/ @@ -16,16 +20,15 @@ __pycache__ .pytest_cache/ coverage_html_report/ .coverage -doc/code/ *.so cpptests *.o .DS_Store .cache/* -.vscode/* +.vscode/ .ycm_extra_conf.py /.vs /pennylane_lightning/.vs /pennylane_lightning/*.pyd /pennylane_lightning/src/Kokkos/ -/pennylane_lightning/src/GBenchmarks/ +/pennylane_lightning/src/GBenchmarks/ \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..3268036a65 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +repos: +- repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + args: [--line-length=100] + exclude: ^(bin/|doc/|scripts/) +- repo: local + hooks: + - id: pylint + name: pylint + entry: pylint + language: system + types: [python] + args: + [ + "-rn", # Only display messages + "-sn", # Don't display the score + "--rcfile=.pylintrc", # Link to your config file + ] + exclude: ^(bin/|doc/|scripts/|setup.py|tests/) diff --git a/CMakeLists.txt b/CMakeLists.txt index d96e94ab79..8047cca61f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,18 +1,18 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.20) set(LOGO [=[ -░█░░░▀█▀░█▀▀░█░█░▀█▀░█▀█░▀█▀░█▀█░█▀▀░░░░▄▀▄░█░█░█▀▄░▀█▀░▀█▀ -░█░░░░█░░█░█░█▀█░░█░░█░█░░█░░█░█░█░█░░░░█\█░█░█░█▀▄░░█░░░█░ -░▀▀▀░▀▀▀░▀▀▀░▀░▀░░▀░░▀░▀░▀▀▀░▀░▀░▀▀▀░▀░░░▀\░▀▀▀░▀▀░░▀▀▀░░▀░ +░█░░░▀█▀░█▀▀░█░█░▀█▀░█▀█░▀█▀░█▀█░█▀▀░ +░█░░░░█░░█░█░█▀█░░█░░█░█░░█░░█░█░█░█░ +░▀▀▀░▀▀▀░▀▀▀░▀░▀░░▀░░▀░▀░▀▀▀░▀░▀░▀▀▀░ ]=]) message(${LOGO}) -set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") +set(CMAKE_OSX_DEPLOYMENT_TARGET "11" CACHE STRING "Minimum OS X deployment version") set(CMAKE_CXX_STANDARD 20) # At least C++20 is required project(pennylane_lightning - DESCRIPTION "C++ state-vector simulator bindings for PennyLane. " + DESCRIPTION "C++ suite of state-vector simulators bindings for PennyLane. " LANGUAGES CXX ) @@ -29,7 +29,7 @@ function(set_pennylane_lightning_version VERSION_FILE_PATH) set(VERSION_STRING ${VERSION_STRING} PARENT_SCOPE) endfunction() -set_pennylane_lightning_version(${PROJECT_SOURCE_DIR}/pennylane_lightning/_version.py) +set_pennylane_lightning_version(${PROJECT_SOURCE_DIR}/pennylane_lightning/core/_version.py) message(STATUS "pennylane_lightning version ${VERSION_STRING}") set(PROJECT_VERSION ${VERSION_STRING}) @@ -45,21 +45,23 @@ option(ENABLE_CLANG_TIDY "Enable clang-tidy build checks" OFF) option(ENABLE_COVERAGE "Enable code coverage" OFF) option(ENABLE_WARNINGS "Enable warnings" ON) option(ENABLE_NATIVE "Enable native CPU build tuning" OFF) +option(ENABLE_PYTHON "Enable compilation of the Python module" ON) + +# OpenMP find_package(OpenMP) if (OpenMP_CXX_FOUND) option(ENABLE_OPENMP "Enable OpenMP" ON) else() option(ENABLE_OPENMP "Enable OpenMP" OFF) endif() -option(ENABLE_KOKKOS "Enable Kokkos" OFF) -option(ENABLE_BLAS "Enable BLAS" OFF) -option(ENABLE_PYTHON "Enable compilation of the Python module" ON) -option(ENABLE_GATE_DISPATCHER "Enable gate kernel dispatching on AVX/AVX2/AVX512" ON) # Other build options option(BUILD_TESTS "Build cpp tests" OFF) option(BUILD_BENCHMARKS "Enable cpp benchmarks" OFF) +# Backend +set(PL_BACKEND "lightning_qubit" CACHE STRING "PennyLane Lightning backend") + # Process compile options list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include("${CMAKE_CURRENT_SOURCE_DIR}/cmake/process_options.cmake") @@ -80,33 +82,38 @@ endif() set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) # All CMakeLists.txt in subdirectories use pennylane_lightning_compile_options and pennylane_lightning_external_libs -add_subdirectory(pennylane_lightning/src) +add_subdirectory(pennylane_lightning/core/src) ##################################################### # Maintain for dependent external package development ##################################################### add_library(pennylane_lightning INTERFACE) -target_link_libraries(pennylane_lightning INTERFACE lightning_utils - lightning_simulator - lightning_algorithms - lightning_gates -) target_include_directories(pennylane_lightning INTERFACE "$") -##################################################### if(ENABLE_PYTHON) - pybind11_add_module(lightning_qubit_ops "pennylane_lightning/src/bindings/Bindings.cpp") - target_link_libraries(lightning_qubit_ops PRIVATE lightning_algorithms - lightning_gates - lightning_simulator - lightning_utils) + message(STATUS "ENABLE_PYTHON is ON.") + pybind11_add_module("${PL_BACKEND}_ops" "pennylane_lightning/core/src/bindings/Bindings.cpp") + + target_link_libraries("${PL_BACKEND}_ops" PRIVATE lightning_compile_options + lightning_external_libs + ) + + target_link_libraries("${PL_BACKEND}_ops" PRIVATE lightning_observables + lightning_utils + lightning_algorithms + ) + + target_link_libraries("${PL_BACKEND}_ops" PRIVATE ${PL_BACKEND} #simulator + "${PL_BACKEND}_algorithms" + "${PL_BACKEND}_observables" + "${PL_BACKEND}_bindings" + "${PL_BACKEND}_measurements" + ) - target_link_libraries(lightning_qubit_ops PRIVATE lightning_compile_options - lightning_external_libs) - set_target_properties(lightning_qubit_ops PROPERTIES CXX_VISIBILITY_PRESET hidden) + set_target_properties("${PL_BACKEND}_ops" PROPERTIES CXX_VISIBILITY_PRESET hidden) - target_compile_definitions(lightning_qubit_ops PRIVATE VERSION_INFO=${VERSION_STRING}) + target_compile_definitions("${PL_BACKEND}_ops" PRIVATE VERSION_INFO=${VERSION_STRING}) endif() install(TARGETS pennylane_lightning @@ -117,9 +124,9 @@ install(TARGETS pennylane_lightning PUBLIC_HEADER DESTINATION include ) -install(DIRECTORY - ${PROJECT_SOURCE_DIR}/pennylane_lightning/src - DESTINATION include/pennylane_lightning +install(DIRECTORY + ${PROJECT_SOURCE_DIR}/pennylane_lightning/core/src + DESTINATION include/pennylane_lightning/core/ ) if (BUILD_TESTS) diff --git a/Makefile b/Makefile index 9d3a2a3a5f..2138be1e4e 100644 --- a/Makefile +++ b/Makefile @@ -4,134 +4,115 @@ PYTHON := python3 COVERAGE := --cov=pennylane_lightning --cov-report term-missing --cov-report=html:coverage_html_report TESTRUNNER := -m pytest tests --tb=short -LIGHTNING_CPP_DIR := pennylane_lightning/src/ +ifdef verbose + VERBOSE := --verbose +else + VERBOSE := +endif + +ifdef check + CHECK := --check +else + CHECK := +endif .PHONY: help help: @echo "Please use \`make ' where is one of" - @echo " install to install PennyLane-Lightning" - @echo " wheel to build the PennyLane-Lightning wheel" - @echo " dist to package the source distribution" - @echo " docs to generate documents" - @echo " clean to delete all temporary, cache, and build files" - @echo " clean-docs to delete all built documentation" - @echo " test to run the test suite" - @echo " test-cpp to run the C++ test suite" - @echo " test-python to run the Python test suite" - @echo " coverage to generate a coverage report" - @echo " format [check=1] to apply C++ and Python formatter; use with 'check=1' to check instead of modify (requires black and clang-format)" - @echo " format [version=?] to apply C++ and Python formatter; use with 'version={version}' to check or modify with clang-format-{version} instead of clang-format" - @echo " check-tidy to build PennyLane-Lightning with ENABLE_CLANG_TIDY=ON (requires clang-tidy & CMake)" - -.PHONY: install -install: -ifndef PYTHON3 - @echo "To install PennyLane-Lightning you need to have Python 3 installed" -endif - $(PYTHON) setup.py install - -.PHONY: wheel -wheel: - $(PYTHON) setup.py bdist_wheel - -.PHONY: dist -dist: - $(PYTHON) setup.py sdist + @echo " docs to generate documents" + @echo " clean to delete all temporary, cache, and build files" + @echo " clean-docs to delete all built documentation" + @echo " test to run the test suite" + @echo " test-cpp [backend=?] to run the C++ test suite (requires CMake)" + @echo " Default: lightning_qubit" + @echo " test-cpp [verbose=1] to run the C++ test suite (requires CMake)" + @echo " use with 'verbose=1' for building with verbose flag" + @echo " test-cpp [target=?] to run a specific C++ test target (requires CMake)." + @echo " coverage-cpp [backend=?] to generate a coverage report for python interface" + @echo " Default: lightning_qubit" + @echo " test-python [device=?] to run the Python test suite" + @echo " Default: lightning.qubit" + @echo " coverage [device=?] to generate a coverage report for python interface" + @echo " Default: lightning.qubit" + @echo " format [check=1] to apply C++ and Python formatter;" + @echo " use with 'check=1' to check instead of modify (requires black and clang-format)" + @echo " format [version=?] to apply C++ and Python formatter;" + @echo " use with 'version={version}' to check or modify with clang-format-{version} instead of clang-format" + @echo " check-tidy [backend=?] to build PennyLane-Lightning with ENABLE_CLANG_TIDY=ON (requires clang-tidy & CMake)" + @echo " Default: lightning_qubit" + @echo " check-tidy [verbose=1] to build PennyLane-Lightning with ENABLE_CLANG_TIDY=ON (requires clang-tidy & CMake)" + @echo " use with 'verbose=1' for building with verbose flag" + @echo " check-tidy [target=?] to build a specific PennyLane-Lightning target with ENABLE_CLANG_TIDY=ON (requires clang-tidy & CMake)" .PHONY : clean clean: - $(PYTHON) setup.py clean --all - $(MAKE) -C doc clean find . -type d -name '__pycache__' -exec rm -r {} \+ - rm -rf dist - rm -rf build - rm -rf BuildTests BuildTidy BuildGBench + rm -rf build Build BuildTests BuildTidy BuildGBench rm -rf .coverage coverage_html_report/ - rm -rf tmp - rm -rf *.dat - rm -rf pennylane_lightning/lightning_qubit_ops* - -docs: - $(MAKE) -C doc html - -.PHONY : clean-docs -clean-docs: - $(MAKE) -C doc clean + rm -rf pennylane_lightning/*_ops* .PHONY : test-builtin test-suite test-python coverage coverage-cpp test-cpp test-cpp-no-omp test-cpp-blas test-cpp-kokkos test-builtin: - $(PYTHON) -I $(TESTRUNNER) + PL_DEVICE=$(if $(device:-=),$(device),lightning.qubit) $(PYTHON) -I $(TESTRUNNER) test-suite: - pl-device-test --device lightning.qubit --skip-ops --shots=20000 - pl-device-test --device lightning.qubit --shots=None --skip-ops + pl-device-test --device $(if $(device:-=),$(device),lightning.qubit) --skip-ops --shots=20000 + pl-device-test --device $(if $(device:-=),$(device),lightning.qubit) --shots=None --skip-ops test-python: test-builtin test-suite coverage: - @echo "Generating coverage report..." + @echo "Generating coverage report for $(if $(device:-=),$(device),lightning.qubit) device:" $(PYTHON) $(TESTRUNNER) $(COVERAGE) - pl-device-test --device lightning.qubit --skip-ops --shots=20000 $(COVERAGE) --cov-append - pl-device-test --device lightning.qubit --shots=None --skip-ops $(COVERAGE) --cov-append + pl-device-test --device $(if $(device:-=),$(device),lightning.qubit) --skip-ops --shots=20000 $(COVERAGE) --cov-append + pl-device-test --device $(if $(device:-=),$(device),lightning.qubit) --shots=None --skip-ops $(COVERAGE) --cov-append coverage-cpp: - @echo "Generating cpp coverage report in BuildCov/out .." + @echo "Generating cpp coverage report in BuildCov/out for $(if $(backend:-=),$(backend),lightning_qubit) backend" rm -rf ./BuildCov - cmake pennylane_lightning/src -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON + cmake -BBuildCov -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTS=ON -DENABLE_COVERAGE=ON -DPL_BACKEND=$(if $(backend:-=),$(backend),lightning_qubit) cmake --build ./BuildCov - cd ./BuildCov; ./tests/pennylane_lightning_test_runner; \ - lcov --directory . -b ../pennylane_lightning/src --capture --output-file coverage.info; \ + cd ./BuildCov; for file in *runner ; do ./$file; done; \ + lcov --directory . -b ../pennylane_lightning/core/src --capture --output-file coverage.info; \ genhtml coverage.info --output-directory out +build: + rm -rf ./Build + cmake -BBuild -DENABLE_BLAS=ON -DENABLE_KOKKOS=ON -DENABLE_WARNINGS=ON -DPL_BACKEND=$(if $(backend:-=),$(backend),lightning_qubit) + cmake --build ./Build $(VERBOSE) + test-cpp: rm -rf ./BuildTests - cmake $(LIGHTNING_CPP_DIR) -BBuildTests -DBUILD_TESTS=ON - cmake --build ./BuildTests --target pennylane_lightning_test_runner - cmake --build ./BuildTests --target test + cmake -BBuildTests -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTS=ON -DENABLE_KOKKOS=ON -DENABLE_OPENMP=ON -DENABLE_WARNINGS=ON -DPL_BACKEND=$(if $(backend:-=),$(backend),lightning_qubit) +ifdef target + cmake --build ./BuildTests $(VERBOSE) --target $(target) + OMP_PROC_BIND=false ./BuildTests/$(target) +else + cmake --build ./BuildTests $(VERBOSE) + OMP_PROC_BIND=false cmake --build ./BuildTests $(VERBOSE) --target test +endif test-cpp-blas: rm -rf ./BuildTests - cmake $(LIGHTNING_CPP_DIR) -BBuildTests -DBUILD_TESTS=ON -DENABLE_BLAS=ON - cmake --build ./BuildTests --target pennylane_lightning_test_runner - cmake --build ./BuildTests --target test - -test-cpp-no-omp: - rm -rf ./BuildTests - cmake $(LIGHTNING_CPP_DIR) -BBuildTests -DBUILD_TESTS=ON -DENABLE_OPENMP=OFF - cmake --build ./BuildTests --target pennylane_lightning_test_runner - cmake --build ./BuildTests --target test + cmake -BBuildTests -DBUILD_TESTS=ON -DENABLE_BLAS=ON -DENABLE_WARNINGS=ON -DPL_BACKEND=$(if $(backend:-=),$(backend),lightning_qubit) + cmake --build ./BuildTests $(VERBOSE) + cmake --build ./BuildTests $(VERBOSE) --target test -test-cpp-kokkos: - rm -rf ./BuildTests - cmake $(LIGHTNING_CPP_DIR) -BBuildTests -DBUILD_TESTS=ON -DENABLE_KOKKOS=ON - cmake --build ./BuildTests --target pennylane_lightning_test_runner - cmake --build ./BuildTests --target test - -.PHONY: gbenchmark -gbenchmark: - rm -rf ./BuildGBench - cmake $(LIGHTNING_CPP_DIR) -BBuildGBench -DBUILD_BENCHMARKS=ON -DENABLE_OPENMP=ON -DENABLE_BLAS=ON -DCMAKE_BUILD_TYPE=Release -DBLA_VENDOR=OpenBLAS - cmake --build ./BuildGBench - -.PHONY: format format-cpp format-python +.PHONY: format format-cpp format: format-cpp format-python format-cpp: -ifdef check - ./bin/format --check --cfversion $(if $(version:-=),$(version),0) ./pennylane_lightning/src -else - ./bin/format --cfversion $(if $(version:-=),$(version),0) ./pennylane_lightning/src -endif + ./bin/format $(CHECK) --cfversion $(if $(version:-=),$(version),0) ./pennylane_lightning format-python: -ifdef check - black -l 100 ./pennylane_lightning/ ./tests --check -else - black -l 100 ./pennylane_lightning/ ./tests -endif + black -l 100 ./pennylane_lightning/ ./tests $(CHECK) .PHONY: check-tidy check-tidy: rm -rf ./BuildTidy - cmake . -BBuildTidy -DENABLE_CLANG_TIDY=ON -DBUILD_TESTS=ON - cmake --build ./BuildTidy + cmake -BBuildTidy -DENABLE_CLANG_TIDY=ON -DBUILD_TESTS=ON -DENABLE_WARNINGS=ON -DPL_BACKEND=$(if $(backend:-=),$(backend),lightning_qubit) +ifdef target + cmake --build ./BuildTidy $(VERBOSE) --target $(target) +else + cmake --build ./BuildTidy $(VERBOSE) +endif \ No newline at end of file diff --git a/README.rst b/README.rst index 704709e67c..9d484d0778 100644 --- a/README.rst +++ b/README.rst @@ -35,7 +35,7 @@ PennyLane-Lightning Plugin .. header-start-inclusion-marker-do-not-remove -The PennyLane-Lightning plugin provides a fast state-vector simulator written in C++. +The PennyLane-Lightning plugin provides fast state-vector simulators written in C++. `PennyLane `_ is a cross-platform Python library for quantum machine learning, automatic differentiation, and optimization of hybrid quantum-classical computations. @@ -46,7 +46,7 @@ learning, automatic differentiation, and optimization of hybrid quantum-classica Features ======== -* Combine PennyLane-Lightning's high performance simulator with PennyLane's +* Combine PennyLane-Lightning's high performance simulators with PennyLane's automatic differentiation and optimization. .. installation-start-inclusion-marker-do-not-remove @@ -55,7 +55,7 @@ Features Installation ============ -PennyLane-Lightning requires Python version 3.8 and above. It can be installed using ``pip``: +PennyLane-Lightning requires Python version 3.9 and above. It can be installed using ``pip``: .. code-block:: console @@ -223,6 +223,16 @@ All contributors to this plugin will be listed as authors on the releases. We also encourage bug reports, suggestions for new features and enhancements, and even links to cool projects or applications built on PennyLane. +Black & Pylint +============== + +If you contribute to the Python code, please mind the following. +The Python code is formatted with the PEP 8 compliant opinionated formatter `Black `_ (`black==23.7.0`). +We set a line width of a 100 characters. +The Python code is statically analyzed with `Pylint `_. +We set up a pre-commit hook (see `Git hooks `_) to run both of these on `git commit`. +Please make your best effort to comply with `black` and `pylint` before using disabling pragmas (e.g. `# pylint: disable=missing-function-docstring`). + Authors ======= @@ -269,6 +279,5 @@ PennyLane Lightning makes use of the following libraries and tools, which are un - **pybind11:** https://github.com/pybind/pybind11 - **Kokkos Core:** https://github.com/kokkos/kokkos -- **Kokkos Kernels:** https://github.com/kokkos/kokkos-kernels -.. acknowledgements-end-inclusion-marker-do-not-remove +.. acknowledgements-end-inclusion-marker-do-not-remove \ No newline at end of file diff --git a/bin/cpp-files b/bin/cpp-files deleted file mode 100755 index 7ccd202783..0000000000 --- a/bin/cpp-files +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python3 - -import argparse -import json -import sys - -from utils import get_cpp_files - - -if __name__ == '__main__': - """ - This program output a json list of all C++ source files. - """ - parser = argparse.ArgumentParser( - description="Output C/C++ files in json list" - ) - parser.add_argument( - "--header-only", action='store_true', dest='header_only', help="whether only include header files" - ) - parser.add_argument( - "paths", nargs="+", metavar="DIR", help="paths to the root source directories" - ) - parser.add_argument( - "--exclude-dirs", dest="exclude_dirs", nargs="*", metavar="DIR", help="paths exclude from" - ) - - args = parser.parse_args() - - files = set(get_cpp_files(args.paths, header_only = args.header_only)) - if args.exclude_dirs: - files_excludes = set(get_cpp_files(args.exclude_dirs, header_only = args.header_only)) - files -= files_excludes - - json.dump(list(files), sys.stdout) diff --git a/bin/utils.py b/bin/cpp_files.py similarity index 100% rename from bin/utils.py rename to bin/cpp_files.py diff --git a/bin/format b/bin/format index 7b116f9185..17357876f4 100755 --- a/bin/format +++ b/bin/format @@ -3,10 +3,9 @@ import argparse import json import re -import shutil import subprocess import sys -from utils import get_cpp_files +from cpp_files import get_cpp_files CLANG_FMT_BIN = "clang-format" CLANG_FMT_STYLE_CFG = { @@ -30,7 +29,7 @@ def parse_version(version_string): def check_bin(command): try: - p = subprocess.run([command, "--version"], + p = subprocess.run([command, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) version = parse_version(p.stdout) diff --git a/cmake/FindMKL.cmake b/cmake/FindMKL.cmake index 64f6d37f93..588f868d32 100644 --- a/cmake/FindMKL.cmake +++ b/cmake/FindMKL.cmake @@ -47,7 +47,7 @@ if (MKLROOT_PATH) elseif (CMAKE_SYSTEM_NAME MATCHES "Linux") set(EXPECT_MKL_LIBPATH "${MKLROOT_PATH}/lib/intel64") endif() - + ########################################################### # Set MKL_INCLUDE and MKL_LIBRARY_DIR ########################################################### @@ -65,7 +65,7 @@ if (MKLROOT_PATH) ########################################################### find_library(LIB_MKL_RT NAMES mkl_rt mkl_rt.1 HINTS ${MKL_LIBRARY_DIR}) - find_library(LIB_PTHREAD NAMES pthread) + find_library(LIB_PTHREAD NAMES pthread) endif (MKLROOT_PATH) @@ -77,10 +77,10 @@ set(MKL_LIBRARY "${LIB_MKL_RT};${LIB_PTHREAD}") include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(MKL DEFAULT_MSG +find_package_handle_standard_args(MKL DEFAULT_MSG MKL_LIBRARY_DIR LIB_MKL_RT LIB_PTHREAD MKL_INCLUDE_DIR) -mark_as_advanced(LIB_MKL_RT LIB_PTHREAD MKL_INCLUDE_DIR) +mark_as_advanced(LIB_MKL_RT LIB_PTHREAD MKL_INCLUDE_DIR) \ No newline at end of file diff --git a/cmake/process_options.cmake b/cmake/process_options.cmake index 1fefa35e57..52d6c42100 100644 --- a/cmake/process_options.cmake +++ b/cmake/process_options.cmake @@ -1,139 +1,13 @@ ############################################################################## -# This file processes ENABLE_WARNINGS, ENABLE_NATIVE, ENABLE_OPENMP, -# ENABLE_KOKKOS, and ENABLE_BLAS -# options and produces interface libraries +# This file processes options: +# ENABLE_WARNINGS, ENABLE_NATIVE, ENABLE_OPENMP +# and produces interface libraries: # lightning_compile_options and lightning_external_libs. ############################################################################## # Include this file only once include_guard() -############################################################################## - -# Macro to aid in finding Kokkos with 3 potential install options: -# 1. Fully integrated Kokkos packages and CMake module files -# 2. Statically compiled libraries and headers -# 3. Not installed, so fall back to building from source. - -macro(FindKokkos target_name) - find_package(Kokkos - HINTS ${CMAKE_SOURCE_DIR}/kokkos - ${CMAKE_SOURCE_DIR}/Kokkos - ${Kokkos_Core_DIR} - /usr - /usr/local - /opt - /opt/Kokkos - ) - - find_package(KokkosKernels - HINTS ${CMAKE_SOURCE_DIR}/kokkos - ${CMAKE_SOURCE_DIR}/Kokkos - ${CMAKE_SOURCE_DIR}/kokkosKernels - ${CMAKE_SOURCE_DIR}/KokkosKernels - ${Kokkos_Kernels_DIR} - /usr - /usr/local - /opt - /opt/KokkosKernels - ) - if(Kokkos_FOUND AND KokkosKernels_FOUND) - message(STATUS "Found existing Kokkos libraries") - target_link_libraries(${target_name} INTERFACE Kokkos::kokkos Kokkos::kokkoskernels) - return() - else() - message(STATUS "Could not find existing Kokkos package. Searching for precompiled libraries and headers") - - find_library(Kokkos_core_lib - NAME kokkoscore.a libkokkoscore.a kokkoscore.so libkokkoscore.so - HINTS ${CMAKE_SOURCE_DIR}/Kokkos/lib - ${Kokkos_Core_DIR}/lib - ${Kokkos_Core_DIR}/lib64 - /usr/lib - /usr/lib64 - /usr/local/lib - /usr/local/lib64 - ENV LD_LIBRARY_PATH - ) - find_library(Kokkos_Kernels_lib - NAME kokkoskernels.a libkokkoskernels.a kokkoskernels.so libkokkoskernels.so - HINTS ${CMAKE_SOURCE_DIR}/Kokkos/lib - ${Kokkos_Kernels_DIR}/lib - ${Kokkos_Kernels_DIR}/lib64 - /usr/lib - /usr/lib64 - /usr/local/lib - /usr/local/lib64 - ENV LD_LIBRARY_PATH - ) - find_file( Kokkos_core_inc - NAMES Kokkos_Core.hpp - HINTS ${Kokkos_Core_DIR}/include - /usr/include - /usr/local/include - ENV CPATH - ) - find_file( Kokkos_sparse_inc - NAMES KokkosSparse.hpp - HINTS ${Kokkos_Kernels_DIR}/include - /usr/include - /usr/local/include - ENV CPATH - ) - if (Kokkos_core_lib_FOUND AND Kokkos_Kernels_lib_FOUND) - message(STATUS "Found existing Kokkos compiled libraries") - - add_library( kokkos SHARED IMPORTED GLOBAL) - add_library( kokkoskernels SHARED IMPORTED GLOBAL) - - cmake_path(GET Kokkos_core_inc ROOT_PATH Kokkos_INC_DIR) - cmake_path(GET Kokkos_sparse_inc ROOT_PATH KokkosKernels_INC_DIR) - - set_target_properties( kokkos PROPERTIES IMPORTED_LOCATION ${Kokkos_core_lib}) - set_target_properties( kokkoskernels PROPERTIES IMPORTED_LOCATION ${Kokkos_Kernels_lib}) - set_target_properties( kokkos PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${Kokkos_INC_DIR}") - set_target_properties( kokkoskernels PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${KokkosKernels_INC_DIR}") - - target_link_libraries(${target_name} PRIVATE kokkos kokkoskernels) - return() - else() - message(STATUS "Building Kokkos from source. SERIAL device enabled.") - - option(Kokkos_ENABLE_SERIAL "Enable Kokkos SERIAL device" ON) - option(Kokkos_ENABLE_COMPLEX_ALIGN "Enable complex alignment in memory" OFF) - - set(CMAKE_POSITION_INDEPENDENT_CODE ON) - include(FetchContent) - - FetchContent_Declare(kokkos - GIT_REPOSITORY https://github.com/kokkos/kokkos.git - GIT_TAG 4.0.01 - GIT_SUBMODULES "" # Avoid recursively cloning all submodules - ) - - FetchContent_MakeAvailable(kokkos) - - get_target_property(kokkos_INC_DIR kokkos INTERFACE_INCLUDE_DIRECTORIES) - set_target_properties(kokkos PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${kokkos_INC_DIR}") - - FetchContent_Declare(kokkoskernels - GIT_REPOSITORY https://github.com/kokkos/kokkos-kernels.git - GIT_TAG 4.0.01 - GIT_SUBMODULES "" # Avoid recursively cloning all submodules - ) - - FetchContent_MakeAvailable(kokkoskernels) - - get_target_property(kokkoskernels_INC_DIR kokkoskernels INTERFACE_INCLUDE_DIRECTORIES) - set_target_properties(kokkoskernels PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${kokkoskernels_INC_DIR}") - target_link_libraries(${target_name} INTERFACE kokkos kokkoskernels) - endif() - endif() -endmacro() - -############################################################################## - - if (WIN32) # Increasing maximum full-path length allowed. message("Setting default path length to 240 characters") @@ -169,6 +43,16 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") $<$:-fwrapv;-fno-plt;-pipe>) endif() +if(ENABLE_CLANG_TIDY) + if(NOT DEFINED CLANG_TIDY_BINARY) + set(CLANG_TIDY_BINARY clang-tidy) + endif() + message(STATUS "Using CLANG_TIDY_BINARY=${CLANG_TIDY_BINARY}") + set(CMAKE_CXX_CLANG_TIDY ${CLANG_TIDY_BINARY}; + -extra-arg=-std=c++20; + ) +endif() + if(ENABLE_COVERAGE) message(STATUS "ENABLE_COVERAGE is ON.") target_compile_options(lightning_compile_options INTERFACE @@ -201,41 +85,13 @@ if(ENABLE_OPENMP) "Install OpenMP or set ENABLE_OPENMP OFF.") endif() + target_link_libraries(lightning_compile_options INTERFACE OpenMP::OpenMP_CXX) target_link_libraries(lightning_external_libs INTERFACE OpenMP::OpenMP_CXX) else() message(STATUS "ENABLE_OPENMP is OFF.") endif() -if(ENABLE_BLAS) - message(STATUS "ENABLE_BLAS is ON.") - find_package(MKL QUIET) - - if(MKL_FOUND) - add_definitions("-DENABLE_MKL") - set(BLAS_INCLUDE_DIRS "${MKL_INCLUDE_DIR}") - set(BLAS_LIBRARIES ${MKL_LIBRARY}) - else() - find_package(CBLAS REQUIRED) - set(BLAS_INCLUDE_DIRS ${CBLAS_INCLUDE_DIRS}) - set(BLAS_LIBRARIES ${CBLAS_LIBRARIES}) - endif() - - target_link_libraries(lightning_external_libs INTERFACE "${BLAS_LIBRARIES}") - target_include_directories(lightning_external_libs INTERFACE "${BLAS_INCLUDE_DIRS}") - target_compile_options(lightning_compile_options INTERFACE "-D_ENABLE_BLAS=1") -else() - message(STATUS "ENABLE_BLAS is OFF.") -endif() - -if(ENABLE_KOKKOS) - message(STATUS "ENABLE_KOKKOS is ON.") - target_compile_options(lightning_compile_options INTERFACE "-D_ENABLE_KOKKOS=1") - FindKokkos(lightning_external_libs) -else() - message(STATUS "ENABLE_KOKKOS is OFF.") -endif() - if (UNIX AND (${CMAKE_SYSTEM_PROCESSOR} MATCHES "(AMD64)|(X64)|(x64)|(x86_64)")) message(STATUS "ENABLE AVX for X64 on UNIX compatible system.") target_compile_options(lightning_compile_options INTERFACE -mavx) -endif() \ No newline at end of file +endif() diff --git a/cmake/support_kokkos.cmake b/cmake/support_kokkos.cmake new file mode 100644 index 0000000000..e37daa6a20 --- /dev/null +++ b/cmake/support_kokkos.cmake @@ -0,0 +1,86 @@ +#################################################################################### +# This file provides macros to process Kokkos and Kokkos Kernels external libraries. +#################################################################################### + +# Include this file only once +include_guard() + +set(KOKKOS_VERSION 4.0.01) + +# Macro to aid in finding Kokkos with 3 potential install options: +# 1. Fully integrated Kokkos packages and CMake module files +# 2. Statically compiled libraries and headers +# 3. Not installed, so fall back to building from source. +macro(FindKokkos target_name) + find_package(Kokkos + HINTS ${CMAKE_SOURCE_DIR}/kokkos + ${CMAKE_SOURCE_DIR}/Kokkos + ${Kokkos_Core_DIR} + /usr + /usr/local + /opt + /opt/Kokkos + ) + + if(Kokkos_FOUND) + message(STATUS "Found existing Kokkos library.") + target_link_libraries(${target_name} INTERFACE Kokkos::kokkos) + else() + message(STATUS "Could not find existing Kokkos package. Searching for precompiled libraries and headers...") + + find_library(Kokkos_core_lib + NAME kokkoscore.a libkokkoscore.a kokkoscore.so libkokkoscore.so + HINTS ${CMAKE_SOURCE_DIR}/Kokkos/lib + ${Kokkos_Core_DIR}/lib + ${Kokkos_Core_DIR}/lib64 + /usr/lib + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + ENV LD_LIBRARY_PATH + ) + find_file( Kokkos_core_inc + NAMES Kokkos_Core.hpp + HINTS ${Kokkos_Core_DIR}/include + /usr/include + /usr/local/include + ENV CPATH + ) + + if (Kokkos_core_lib_FOUND) + message(STATUS "Found existing Kokkos compiled libraries.") + + add_library( kokkos SHARED IMPORTED GLOBAL) + + cmake_path(GET Kokkos_core_inc ROOT_PATH Kokkos_INC_DIR) + + set_target_properties( kokkos PROPERTIES IMPORTED_LOCATION ${Kokkos_core_lib}) + set_target_properties( kokkos PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${Kokkos_INC_DIR}") + + target_link_libraries(${target_name} PRIVATE kokkos) + else() + message(STATUS "Building Kokkos from source. SERIAL device enabled.") + message(STATUS "Requested Kokkos library version: ${KOKKOS_VERSION}") + + # option(Kokkos_ENABLE_SERIAL "Enable Kokkos SERIAL device" ON) + option(Kokkos_ENABLE_SERIAL "Enable Kokkos SERIAL device" ON) + option(Kokkos_ENABLE_COMPLEX_ALIGN "Enable complex alignment in memory" OFF) + + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + include(FetchContent) + + FetchContent_Declare(kokkos + GIT_REPOSITORY https://github.com/kokkos/kokkos.git + GIT_TAG ${KOKKOS_VERSION} + GIT_SUBMODULES "" # Avoid recursively cloning all submodules + ) + + FetchContent_MakeAvailable(kokkos) + + get_target_property(kokkos_INC_DIR kokkos INTERFACE_INCLUDE_DIRECTORIES) + set_target_properties(kokkos PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${kokkos_INC_DIR}") + + target_link_libraries(${target_name} INTERFACE kokkos) + endif() + endif() +endmacro() \ No newline at end of file diff --git a/cmake/support_simulators.cmake b/cmake/support_simulators.cmake new file mode 100644 index 0000000000..002dc61299 --- /dev/null +++ b/cmake/support_simulators.cmake @@ -0,0 +1,38 @@ +#################################################################################### +# This file provides macros to support PennyLane Lightning simulators, +# and to process the PL_BACKEND variable. +#################################################################################### + +# Include this file only once +include_guard() + +# All simulators have their own directory in "simulators" +# This macro will extract this list of directories. +MACRO(FIND_SIMULATORS_LIST RESULT) + set(SIMULATORS_DIR ${CMAKE_SOURCE_DIR}/pennylane_lightning/core/src/simulators) + FILE(GLOB FULL_LIST RELATIVE ${SIMULATORS_DIR} ${SIMULATORS_DIR}/*) + SET(${RESULT} "") + FOREACH(ITEM ${FULL_LIST}) + IF(IS_DIRECTORY ${SIMULATORS_DIR}/${ITEM}) + LIST(APPEND ${RESULT} ${ITEM}) + ENDIF() + ENDFOREACH() +ENDMACRO() + +# Checking if the chosen simulator (or Backend) is valid. +# If valid: its directory will be added to the building process. +# If invalid: A error message, with a list of valid simulators (backends), will be printed out. +MACRO(FIND_AND_ADD_SIMULATOR) + # Finding the list of simulators: + FIND_SIMULATORS_LIST(SIMULATORS_LIST) + + if (${PL_BACKEND} IN_LIST SIMULATORS_LIST) + add_subdirectory(${PL_BACKEND}) + else() + message("Could not find the required backend. Options found are:") + FOREACH(SIMULATOR ${SIMULATORS_LIST}) + message(" * " ${SIMULATOR}) + ENDFOREACH() + message(FATAL_ERROR "Building process will not proceed. Failed to find backend.") + endif() +ENDMACRO() \ No newline at end of file diff --git a/cmake/support_tests.cmake b/cmake/support_tests.cmake new file mode 100644 index 0000000000..5279efceee --- /dev/null +++ b/cmake/support_tests.cmake @@ -0,0 +1,51 @@ +#################################################################################### +# This file provides macros to support the test suite. +#################################################################################### + +# Include this file only once +include_guard() + +# This macro fetch Catch2 from its Github repository. +# After that Catch2 is configured and included. +macro(FetchAndIncludeCatch) + Include(FetchContent) + + FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG v2.13.9 + ) + + FetchContent_MakeAvailable(Catch2) + + get_target_property(CATCH2_IID Catch2 INTERFACE_INCLUDE_DIRECTORIES) + set_target_properties(Catch2 PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CATCH2_IID}") + + # Required for catch_discover_tests() and include(Catch) + list(APPEND CMAKE_MODULE_PATH ${catch2_SOURCE_DIR}/contrib) + + include(Catch) +endmacro() + +# Process ENABLE_WARNINGS, and ENABLE_NATIVE options. +macro(ProcessTestOptions target_name) + if(ENABLE_WARNINGS) + message(STATUS "ENABLE_WARNINGS is ON.") + if(MSVC) + target_compile_options(${target_name} INTERFACE $<$:/W4;/WX>) + else() + target_compile_options(${target_name} INTERFACE $<$:-Wall;-Wextra;-Werror>) + endif() + else() + if(MSVC) + target_compile_options(${target_name} INTERFACE $<$:/W4>) + else() + target_compile_options(${target_name} INTERFACE $<$:-Wall;-Wno-unused>) + endif() + endif() + + if(ENABLE_NATIVE) + message(STATUS "ENABLE_NATIVE is ON. Use -march=native for cpptests.") + target_compile_options(${target_name} INTERFACE -march=native) + endif() +endmacro() \ No newline at end of file diff --git a/pennylane_lightning/src/tests/.clang-tidy b/pennylane_lightning/.clang-tidy similarity index 100% rename from pennylane_lightning/src/tests/.clang-tidy rename to pennylane_lightning/.clang-tidy diff --git a/pennylane_lightning/_serialize.py b/pennylane_lightning/_serialize.py deleted file mode 100644 index aaeff28b50..0000000000 --- a/pennylane_lightning/_serialize.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2021 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -r""" -Helper functions for serializing quantum tapes. -""" -from typing import List, Tuple - -import numpy as np -from pennylane import ( - BasisState, - Hadamard, - PauliX, - PauliY, - PauliZ, - Identity, - QubitStateVector, - Rot, -) -from pennylane.operation import Tensor -from pennylane.tape import QuantumTape -from pennylane.math import unwrap - -# Remove after the next release of PL -# Add from pennylane import matrix -import pennylane as qml - -try: - from .lightning_qubit_ops import ( - StateVectorC64, - StateVectorC128, - ) - from .lightning_qubit_ops.adjoint_diff import ( - NamedObsC64, - NamedObsC128, - HermitianObsC64, - HermitianObsC128, - TensorProdObsC64, - TensorProdObsC128, - HamiltonianC64, - HamiltonianC128, - OpsStructC64, - OpsStructC128, - ) -except ImportError: - pass - -pauli_name_map = { - "I": "Identity", - "X": "PauliX", - "Y": "PauliY", - "Z": "PauliZ", -} - - -def _serialize_named_obs(ob, wires_map: dict, use_csingle: bool): - """Serializes a Named observable""" - named_obs = NamedObsC64 if use_csingle else NamedObsC128 - wires = [wires_map[w] for w in ob.wires] - if ob.name == "Identity": - wires = wires[:1] - return named_obs(ob.name, wires) - - -def _serialize_hermitian_ob(o, wires_map: dict, use_csingle: bool): - """Serializes a Hermitian observable""" - assert not isinstance(o, Tensor) - - if use_csingle: - ctype = np.complex64 - hermitian_obs = HermitianObsC64 - else: - ctype = np.complex128 - hermitian_obs = HermitianObsC128 - - wires = [wires_map[w] for w in o.wires] - return hermitian_obs(qml.matrix(o).ravel().astype(ctype), wires) - - -def _serialize_tensor_ob(ob, wires_map: dict, use_csingle: bool): - """Serialize a tensor observable""" - assert isinstance(ob, Tensor) - - if use_csingle: - tensor_obs = TensorProdObsC64 - else: - tensor_obs = TensorProdObsC128 - - return tensor_obs([_serialize_ob(o, wires_map, use_csingle) for o in ob.obs]) - - -def _serialize_hamiltonian(ob, wires_map: dict, use_csingle: bool): - if use_csingle: - rtype = np.float32 - hamiltonian_obs = HamiltonianC64 - else: - rtype = np.float64 - hamiltonian_obs = HamiltonianC128 - - coeffs = np.array(unwrap(ob.coeffs)).astype(rtype) - terms = [_serialize_ob(t, wires_map, use_csingle) for t in ob.ops] - return hamiltonian_obs(coeffs, terms) - - -def _serialize_pauli_word(ob, wires_map: dict, use_csingle: bool): - """Serialize a :class:`pennylane.pauli.PauliWord` into a Named or Tensor observable.""" - if use_csingle: - named_obs = NamedObsC64 - tensor_obs = TensorProdObsC64 - else: - named_obs = NamedObsC128 - tensor_obs = TensorProdObsC128 - - if len(ob) == 1: - wire, pauli = list(ob.items())[0] - return named_obs(pauli_name_map[pauli], [wires_map[wire]]) - - return tensor_obs( - [named_obs(pauli_name_map[pauli], [wires_map[wire]]) for wire, pauli in ob.items()] - ) - - -def _serialize_pauli_sentence(ob, wires_map: dict, use_csingle: bool): - """Serialize a :class:`pennylane.pauli.PauliSentence` into a Hamiltonian.""" - if use_csingle: - rtype = np.float32 - hamiltonian_obs = HamiltonianC64 - else: - rtype = np.float64 - hamiltonian_obs = HamiltonianC128 - - pwords, coeffs = zip(*ob.items()) - terms = [_serialize_pauli_word(pw, wires_map, use_csingle) for pw in pwords] - coeffs = np.array(coeffs).astype(rtype) - return hamiltonian_obs(coeffs, terms) - - -def _serialize_ob(ob, wires_map, use_csingle): - if isinstance(ob, Tensor): - return _serialize_tensor_ob(ob, wires_map, use_csingle) - elif ob.name == "Hamiltonian": - return _serialize_hamiltonian(ob, wires_map, use_csingle) - elif isinstance(ob, (PauliX, PauliY, PauliZ, Identity, Hadamard)): - return _serialize_named_obs(ob, wires_map, use_csingle) - elif ob._pauli_rep is not None: - return _serialize_pauli_sentence(ob._pauli_rep, wires_map, use_csingle) - else: - return _serialize_hermitian_ob(ob, wires_map, use_csingle) - - -def _serialize_observables(tape: QuantumTape, wires_map: dict, use_csingle: bool = False) -> List: - """Serializes the observables of an input tape. - - Args: - tape (QuantumTape): the input quantum tape - wires_map (dict): a dictionary mapping input wires to the device's backend wires - use_csingle (bool): whether to use np.complex64 instead of np.complex128 - - Returns: - list(ObsStructC128 or ObsStructC64): A list of observable objects compatible with the C++ backend - """ - - return [_serialize_ob(ob, wires_map, use_csingle) for ob in tape.observables] - - -def _serialize_ops( - tape: QuantumTape, wires_map: dict -) -> Tuple[List[List[str]], List[np.ndarray], List[List[int]], List[bool], List[np.ndarray]]: - """Serializes the operations of an input tape. - - The state preparation operations are not included. - - Args: - tape (QuantumTape): the input quantum tape - wires_map (dict): a dictionary mapping input wires to the device's backend wires - - Returns: - Tuple[list, list, list, list, list]: A serialization of the operations, containing a list - of operation names, a list of operation parameters, a list of observable wires, a list of - inverses, and a list of matrices for the operations that do not have a dedicated kernel. - """ - names = [] - params = [] - wires = [] - mats = [] - - uses_stateprep = False - - for o in tape.operations: - if isinstance(o, (BasisState, QubitStateVector)): - uses_stateprep = True - continue - elif isinstance(o, Rot): - op_list = o.expand().operations - else: - op_list = [o] - - for single_op in op_list: - name = single_op.name - names.append(name) - - if not hasattr(StateVectorC128, name): - params.append([]) - mats.append(qml.matrix(single_op)) - - else: - params.append(single_op.parameters) - mats.append([]) - - wires_list = single_op.wires.tolist() - wires.append([wires_map[w] for w in wires_list]) - - inverses = [False] * len(names) - return (names, params, wires, inverses, mats), uses_stateprep diff --git a/pennylane_lightning/__init__.py b/pennylane_lightning/core/__init__.py similarity index 80% rename from pennylane_lightning/__init__.py rename to pennylane_lightning/core/__init__.py index 6a316c5efc..d3a37803e8 100644 --- a/pennylane_lightning/__init__.py +++ b/pennylane_lightning/core/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Xanadu Quantum Technologies Inc. +# Copyright 2018-2023 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Top level PennyLane-Lightning module.""" +"""PennyLane lightning module.""" from ._version import __version__ -from .lightning_qubit import LightningQubit diff --git a/pennylane_lightning/core/_serialize.py b/pennylane_lightning/core/_serialize.py new file mode 100644 index 0000000000..17a60d2c34 --- /dev/null +++ b/pennylane_lightning/core/_serialize.py @@ -0,0 +1,233 @@ +# Copyright 2021 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +r""" +Helper functions for serializing quantum tapes. +""" +from typing import List, Tuple +import numpy as np +from pennylane import ( + BasisState, + Hadamard, + PauliX, + PauliY, + PauliZ, + Identity, + QubitStateVector, + Rot, +) +from pennylane.operation import Tensor +from pennylane.tape import QuantumTape +from pennylane.math import unwrap + +from pennylane import matrix, DeviceError + +pauli_name_map = { + "I": "Identity", + "X": "PauliX", + "Y": "PauliY", + "Z": "PauliZ", +} + + +class QuantumScriptSerializer: + """Serializer class for `pennylane.tape.QuantumScript` data. + + Args: + device_name: device shortname. + use_csingle (bool): whether to use np.complex64 instead of np.complex128 + + """ + + # pylint: disable=import-outside-toplevel, too-many-instance-attributes + def __init__(self, device_name, use_csingle: bool = False): + self.use_csingle = use_csingle + if device_name == "lightning.qubit": + try: + import pennylane_lightning.lightning_qubit_ops as lightning_ops + except ImportError as exception: + raise ImportError( + f"Pre-compiled binaries for {device_name}" + " serialize functionality are not available." + ) from exception + elif device_name == "lightning.kokkos": + try: + import pennylane_lightning.lightning_kokkos_ops as lightning_ops + except ImportError as exception: + raise ImportError( + f"Pre-compiled binaries for {device_name}" + " serialize functionality are not available." + ) from exception + else: + raise DeviceError(f'The device name "{device_name}" is not a valid option.') + self.statevector_c128 = lightning_ops.StateVectorC128 + self.named_obs_c64 = lightning_ops.observables.NamedObsC64 + self.named_obs_c128 = lightning_ops.observables.NamedObsC128 + self.hermitian_obs_c64 = lightning_ops.observables.HermitianObsC64 + self.hermitian_obs_c128 = lightning_ops.observables.HermitianObsC128 + self.tensor_prod_obs_c64 = lightning_ops.observables.TensorProdObsC64 + self.tensor_prod_obs_c128 = lightning_ops.observables.TensorProdObsC128 + self.hamiltonian_c64 = lightning_ops.observables.HamiltonianC64 + self.hamiltonian_c128 = lightning_ops.observables.HamiltonianC128 + + @property + def ctype(self): + """Complex type.""" + return np.complex64 if self.use_csingle else np.complex128 + + @property + def rtype(self): + """Real type.""" + return np.float32 if self.use_csingle else np.float64 + + @property + def named_obs(self): + """Named observable matching ``use_csingle`` precision.""" + return self.named_obs_c64 if self.use_csingle else self.named_obs_c128 + + @property + def hermitian_obs(self): + """Hermitian observable matching ``use_csingle`` precision.""" + return self.hermitian_obs_c64 if self.use_csingle else self.hermitian_obs_c128 + + @property + def tensor_obs(self): + """Tensor product observable matching ``use_csingle`` precision.""" + return self.tensor_prod_obs_c64 if self.use_csingle else self.tensor_prod_obs_c128 + + @property + def hamiltonian_obs(self): + """Hamiltonian observable matching ``use_csingle`` precision.""" + return self.hamiltonian_c64 if self.use_csingle else self.hamiltonian_c128 + + def _named_obs(self, observable, wires_map: dict): + """Serializes a Named observable""" + wires = [wires_map[w] for w in observable.wires] + if observable.name == "Identity": + wires = wires[:1] + return self.named_obs(observable.name, wires) + + def _hermitian_ob(self, observable, wires_map: dict): + """Serializes a Hermitian observable""" + assert not isinstance(observable, Tensor) + + wires = [wires_map[w] for w in observable.wires] + return self.hermitian_obs(matrix(observable).ravel().astype(self.ctype), wires) + + def _tensor_ob(self, observable, wires_map: dict): + """Serialize a tensor observable""" + assert isinstance(observable, Tensor) + return self.tensor_obs([self._ob(obs, wires_map) for obs in observable.obs]) + + def _hamiltonian(self, observable, wires_map: dict): + coeffs = np.array(unwrap(observable.coeffs)).astype(self.rtype) + terms = [self._ob(t, wires_map) for t in observable.ops] + return self.hamiltonian_obs(coeffs, terms) + + def _pauli_word(self, observable, wires_map: dict): + """Serialize a :class:`pennylane.pauli.PauliWord` into a Named or Tensor observable.""" + if len(observable) == 1: + wire, pauli = list(observable.items())[0] + return self.named_obs(pauli_name_map[pauli], [wires_map[wire]]) + + return self.tensor_obs( + [ + self.named_obs(pauli_name_map[pauli], [wires_map[wire]]) + for wire, pauli in observable.items() + ] + ) + + def _pauli_sentence(self, observable, wires_map: dict): + """Serialize a :class:`pennylane.pauli.PauliSentence` into a Hamiltonian.""" + pwords, coeffs = zip(*observable.items()) + terms = [self._pauli_word(pw, wires_map) for pw in pwords] + coeffs = np.array(coeffs).astype(self.rtype) + return self.hamiltonian_obs(coeffs, terms) + + # pylint: disable=protected-access + def _ob(self, observable, wires_map): + """Serialize a :class:`pennylane.operation.Observable` into an Observable.""" + if isinstance(observable, Tensor): + return self._tensor_ob(observable, wires_map) + if observable.name == "Hamiltonian": + return self._hamiltonian(observable, wires_map) + if isinstance(observable, (PauliX, PauliY, PauliZ, Identity, Hadamard)): + return self._named_obs(observable, wires_map) + if observable._pauli_rep is not None: + return self._pauli_sentence(observable._pauli_rep, wires_map) + return self._hermitian_ob(observable, wires_map) + + def serialize_observables(self, tape: QuantumTape, wires_map: dict) -> List: + """Serializes the observables of an input tape. + + Args: + tape (QuantumTape): the input quantum tape + wires_map (dict): a dictionary mapping input wires to the device's backend wires + + Returns: + list(ObsStructC128 or ObsStructC64): A list of observable objects compatible with + the C++ backend + """ + + return [self._ob(observable, wires_map) for observable in tape.observables] + + def serialize_ops( + self, tape: QuantumTape, wires_map: dict + ) -> Tuple[List[List[str]], List[np.ndarray], List[List[int]], List[bool], List[np.ndarray]]: + """Serializes the operations of an input tape. + + The state preparation operations are not included. + + Args: + tape (QuantumTape): the input quantum tape + wires_map (dict): a dictionary mapping input wires to the device's backend wires + + Returns: + Tuple[list, list, list, list, list]: A serialization of the operations, containing a + list of operation names, a list of operation parameters, a list of observable wires, + a list of inverses, and a list of matrices for the operations that do not have a + dedicated kernel. + """ + names = [] + params = [] + wires = [] + mats = [] + + uses_stateprep = False + + for operation in tape.operations: + if isinstance(operation, (BasisState, QubitStateVector)): + uses_stateprep = True + continue + if isinstance(operation, Rot): + op_list = operation.expand().operations + else: + op_list = [operation] + + for single_op in op_list: + name = single_op.name + names.append(name) + + if not hasattr(self.statevector_c128, name): + params.append([]) + mats.append(matrix(single_op)) + + else: + params.append(single_op.parameters) + mats.append([]) + + wires_list = single_op.wires.tolist() + wires.append([wires_map[w] for w in wires_list]) + + inverses = [False] * len(names) + return (names, params, wires, inverses, mats), uses_stateprep diff --git a/pennylane_lightning/_version.py b/pennylane_lightning/core/_version.py similarity index 88% rename from pennylane_lightning/_version.py rename to pennylane_lightning/core/_version.py index 7e05d3ddb5..b6f5bb622f 100644 --- a/pennylane_lightning/_version.py +++ b/pennylane_lightning/core/_version.py @@ -1,4 +1,4 @@ -# Copyright 2020 Xanadu Quantum Technologies Inc. +# Copyright 2018-2023 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.32.0-dev5" +__version__ = "0.32.0-dev6" diff --git a/pennylane_lightning/core/lightning_base.py b/pennylane_lightning/core/lightning_base.py new file mode 100644 index 0000000000..ca1ef0b40d --- /dev/null +++ b/pennylane_lightning/core/lightning_base.py @@ -0,0 +1,413 @@ +# Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r""" +This module contains the base class for all PennyLane Lightning simulator devices, +and interfaces with C++ for improved performance. +""" +from typing import List +from itertools import islice, product +import numpy as np + + +import pennylane as qml +from pennylane import ( + BasisState, + QubitDevice, + QubitStateVector, +) +from pennylane.devices import DefaultQubit +from pennylane.measurements import MeasurementProcess +from pennylane.operation import Operation +from pennylane.wires import Wires + + +from ._version import __version__ +from ._serialize import QuantumScriptSerializer + + +def _chunk_iterable(iteration, num_chunks): + "Lazy-evaluated chunking of given iterable from https://stackoverflow.com/a/22045226" + iteration = iter(iteration) + return iter(lambda: tuple(islice(iteration, num_chunks)), ()) + + +class LightningBase(QubitDevice): + """PennyLane Lightning Base device. + + This intermediate base class provides device-agnostic functionalities. + + Use of this device requires pre-built binaries or compilation from source. Check out the + :doc:`/installation` guide for more details. + + Args: + wires (int): the number of wires to initialize the device with + c_dtype: Datatypes for statevector representation. Must be one of + ``np.complex64`` or ``np.complex128``. + shots (int): How many times the circuit should be evaluated (or sampled) to estimate + stochastic return values. Defaults to ``None`` if not specified. Setting + to ``None`` results in computing statistics like expectation values and + variances analytically. + batch_obs (bool): Determine whether we process observables in parallel when computing + the jacobian. This value is only relevant when the lightning qubit is built with + OpenMP. + """ + + pennylane_requires = ">=0.30" + version = __version__ + author = "Xanadu Inc." + short_name = "lightning.base" + _CPP_BINARY_AVAILABLE = True + + def __init__( + self, + wires, + *, + c_dtype=np.complex128, + shots=None, + batch_obs=False, + ): + if c_dtype is np.complex64: + r_dtype = np.float32 + self.use_csingle = True + elif c_dtype is np.complex128: + r_dtype = np.float64 + self.use_csingle = False + else: + raise TypeError(f"Unsupported complex Type: {c_dtype}") + super().__init__(wires, shots=shots, r_dtype=r_dtype, c_dtype=c_dtype) + self._batch_obs = batch_obs + + @property + def stopping_condition(self): + """.BooleanFn: Returns the stopping condition for the device. The returned + function accepts a queueable object (including a PennyLane operation + and observable) and returns ``True`` if supported by the device.""" + + def accepts_obj(obj): + if obj.name == "QFT" and len(obj.wires) < 10: + return True + if obj.name == "GroverOperator" and len(obj.wires) < 13: + return True + return (not isinstance(obj, qml.tape.QuantumTape)) and getattr( + self, "supports_operation", lambda name: False + )(obj.name) + + return qml.BooleanFn(accepts_obj) + + @classmethod + def capabilities(cls): + capabilities = super().capabilities().copy() + capabilities.update( + model="qubit", + supports_analytic_computation=True, + supports_broadcasting=False, + returns_state=True, + ) + return capabilities + + # To be able to validate the adjoint method [_validate_adjoint_method(device)], + # the qnode requires the definition of: + # ["_apply_operation", "_apply_unitary", "adjoint_jacobian"] + # pylint: disable=missing-function-docstring + def _apply_operation(self): + pass + + # pylint: disable=missing-function-docstring + def _apply_unitary(self): + pass + + def _init_process_jacobian_tape(self, tape, starting_state, use_device_state): + """Generate an initial state vector for ``_process_jacobian_tape``.""" + + @property + def create_ops_list(self): + """Returns create_ops_list function of the matching precision.""" + + def probability_lightning(self, wires): + """Return the probability of each computational basis state.""" + + def vjp(self, measurements, grad_vec, starting_state=None, use_device_state=False): + """Generate the processing function required to compute the vector-Jacobian + products of a tape. + """ + + def probability(self, wires=None, shot_range=None, bin_size=None): + """Return the probability of each computational basis state. + + Devices that require a finite number of shots always return the + estimated probability. + + Args: + wires (Iterable[Number, str], Number, str, Wires): wires to return + marginal probabilities for. Wires not provided are traced out of the system. + shot_range (tuple[int]): 2-tuple of integers specifying the range of samples + to use. If not specified, all samples are used. + bin_size (int): Divides the shot range into bins of size ``bin_size``, and + returns the measurement statistic separately over each bin. If not + provided, the entire shot range is treated as a single bin. + + Returns: + array[float]: list of the probabilities + """ + if self.shots is not None: + return self.estimate_probability(wires=wires, shot_range=shot_range, bin_size=bin_size) + + wires = wires or self.wires + wires = Wires(wires) + + # translate to wire labels used by device + device_wires = self.map_wires(wires) + + if ( + device_wires + and len(device_wires) > 1 + and (not np.all(np.array(device_wires)[:-1] <= np.array(device_wires)[1:])) + ): + raise RuntimeError( + "Lightning does not currently support out-of-order indices for probabilities" + ) + return self.probability_lightning(device_wires) + + def _get_diagonalizing_gates(self, circuit: qml.tape.QuantumTape) -> List[Operation]: + # pylint: disable=no-member, protected-access + def skip_diagonalizing(obs): + return isinstance(obs, qml.Hamiltonian) or ( + isinstance(obs, qml.ops.Sum) and obs._pauli_rep is not None + ) + + meas_filtered = list( + filter(lambda m: m.obs is None or not skip_diagonalizing(m.obs), circuit.measurements) + ) + return super()._get_diagonalizing_gates(qml.tape.QuantumScript(measurements=meas_filtered)) + + def _preprocess_state_vector(self, state, device_wires): + """Initialize the internal state vector in a specified state. + + Args: + state (array[complex]): normalized input state of length ``2**len(wires)`` + or broadcasted state of shape ``(batch_size, 2**len(wires))`` + device_wires (Wires): wires that get initialized in the state + + Returns: + array[complex]: normalized input state of length ``2**len(wires)`` + or broadcasted state of shape ``(batch_size, 2**len(wires))`` + array[int]: indices for which the state is changed to input state vector elements + """ + + # translate to wire labels used by device + device_wires = self.map_wires(device_wires) + + state = self._asarray(state, dtype=self.C_DTYPE) + + if len(device_wires) == self.num_wires and Wires(sorted(device_wires)) == device_wires: + return None, state + + # generate basis states on subset of qubits via the cartesian product + basis_states = np.array(list(product([0, 1], repeat=len(device_wires)))) + + # get basis states to alter on full set of qubits + unravelled_indices = np.zeros((2 ** len(device_wires), self.num_wires), dtype=int) + unravelled_indices[:, device_wires] = basis_states + + # get indices for which the state is changed to input state vector elements + ravelled_indices = np.ravel_multi_index(unravelled_indices.T, [2] * self.num_wires) + return ravelled_indices, state + + def _get_basis_state_index(self, state, wires): + """Returns the basis state index of a specified computational basis state. + + Args: + state (array[int]): computational basis state of shape ``(wires,)`` + consisting of 0s and 1s + wires (Wires): wires that the provided computational state should be initialized on + + Returns: + int: basis state index + """ + # translate to wire labels used by device + device_wires = self.map_wires(wires) + + # length of basis state parameter + n_basis_state = len(state) + + if not set(state.tolist()).issubset({0, 1}): + raise ValueError("BasisState parameter must consist of 0 or 1 integers.") + + if n_basis_state != len(device_wires): + raise ValueError("BasisState parameter and wires must be of equal length.") + + # get computational basis state number + basis_states = 2 ** (self.num_wires - 1 - np.array(device_wires)) + basis_states = qml.math.convert_like(basis_states, state) + return int(qml.math.dot(state, basis_states)) + + # pylint: disable=too-many-function-args, assignment-from-no-return + def _process_jacobian_tape(self, tape, starting_state, use_device_state): + state_vector = self._init_process_jacobian_tape(tape, starting_state, use_device_state) + + obs_serialized = QuantumScriptSerializer( + self.short_name, self.use_csingle + ).serialize_observables(tape, self.wire_map) + ops_serialized, use_sp = QuantumScriptSerializer( + self.short_name, self.use_csingle + ).serialize_ops(tape, self.wire_map) + + ops_serialized = self.create_ops_list(*ops_serialized) + + # We need to filter out indices in trainable_params which do not + # correspond to operators. + trainable_params = sorted(tape.trainable_params) + if len(trainable_params) == 0: + return None + + tp_shift = [] + record_tp_rows = [] + all_params = 0 + + for op_idx, trainable_param in enumerate(trainable_params): + # get op_idx-th operator among differentiable operators + operation, _, _ = tape.get_operation(op_idx) + if isinstance(operation, Operation) and not isinstance( + operation, (BasisState, QubitStateVector) + ): + # We now just ignore non-op or state preps + tp_shift.append(trainable_param) + record_tp_rows.append(all_params) + all_params += 1 + + if use_sp: + # When the first element of the tape is state preparation. Still, I am not sure + # whether there must be only one state preparation... + tp_shift = [i - 1 for i in tp_shift] + + return { + "state_vector": state_vector, + "obs_serialized": obs_serialized, + "ops_serialized": ops_serialized, + "tp_shift": tp_shift, + "record_tp_rows": record_tp_rows, + "all_params": all_params, + } + + # pylint: disable=unnecessary-pass + @staticmethod + def _check_adjdiff_supported_measurements(measurements: List[MeasurementProcess]): + """Check whether given list of measurement is supported by adjoint_differentiation. + + Args: + measurements (List[MeasurementProcess]): a list of measurement processes to check. + + Returns: + Expectation or State: a common return type of measurements. + """ + pass + + @staticmethod + def _adjoint_jacobian_processing(jac): + """ + Post-process the Jacobian matrix returned by ``adjoint_jacobian`` for + the new return type system. + """ + jac = np.squeeze(jac) + + if jac.ndim == 0: + return np.array(jac) + + if jac.ndim == 1: + return tuple(np.array(j) for j in jac) + + # must be 2-dimensional + return tuple(tuple(np.array(j_) for j_ in j) for j in jac) + + # pylint: disable=too-many-arguments + def batch_vjp( + self, tapes, grad_vecs, reduction="append", starting_state=None, use_device_state=False + ): + """Generate the processing function required to compute the vector-Jacobian products + of a batch of tapes. + + Args: + tapes (Sequence[.QuantumTape]): sequence of quantum tapes to differentiate + grad_vecs (Sequence[tensor_like]): Sequence of gradient-output vectors ``grad_vec``. + Must be the same length as ``tapes``. Each ``grad_vec`` tensor should have + shape matching the output shape of the corresponding tape. + reduction (str): Determines how the vector-Jacobian products are returned. + If ``append``, then the output of the function will be of the form + ``List[tensor_like]``, with each element corresponding to the VJP of each + input tape. If ``extend``, then the output VJPs will be concatenated. + starting_state (tensor_like): post-forward pass state to start execution with. + It should be complex-valued. Takes precedence over ``use_device_state``. + use_device_state (bool): use current device state to initialize. A forward pass of + the same circuit should be the last thing the device has executed. + If a ``starting_state`` is provided, that takes precedence. + + Returns: + The processing function required to compute the vector-Jacobian products + of a batch of tapes. + """ + fns = [] + + # Loop through the tapes and grad_vecs vector + for tape, grad_vec in zip(tapes, grad_vecs): + fun = self.vjp( + tape.measurements, + grad_vec, + starting_state=starting_state, + use_device_state=use_device_state, + ) + fns.append(fun) + + def processing_fns(tapes): + vjps = [] + for tape, fun in zip(tapes, fns): + vjp = fun(tape) + + # make sure vjp is iterable if using extend reduction + if ( + not isinstance(vjp, tuple) + and getattr(reduction, "__name__", reduction) == "extend" + ): + vjp = (vjp,) + + if isinstance(reduction, str): + getattr(vjps, reduction)(vjp) + elif callable(reduction): + reduction(vjps, vjp) + + return vjps + + return processing_fns + + +class LightningBaseFallBack(DefaultQubit): # pragma: no cover + # pylint: disable=missing-class-docstring + pennylane_requires = ">=0.30" + version = __version__ + author = "Xanadu Inc." + _CPP_BINARY_AVAILABLE = False + + def __init__(self, wires, *, c_dtype=np.complex128, **kwargs): + if c_dtype is np.complex64: + r_dtype = np.float32 + elif c_dtype is np.complex128: + r_dtype = np.float64 + else: + raise TypeError(f"Unsupported complex Type: {c_dtype}") + super().__init__(wires, r_dtype=r_dtype, c_dtype=c_dtype, **kwargs) + + @property + def state_vector(self): + """Returns a handle to the statevector.""" + return self._state diff --git a/pennylane_lightning/core/src/CMakeLists.txt b/pennylane_lightning/core/src/CMakeLists.txt new file mode 100644 index 0000000000..9f20a182f0 --- /dev/null +++ b/pennylane_lightning/core/src/CMakeLists.txt @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.20) + +project(lightning_components LANGUAGES CXX) + +############################################################################### +# Include all nested sources directories +############################################################################### +set(COMPONENT_SUBDIRS algorithms + bindings + gates + measurements + observables + simulators + utils +) +foreach(COMP ${COMPONENT_SUBDIRS}) + add_subdirectory(${COMP}) +endforeach() + +if (BUILD_TESTS) + # Include macros supporting tests. + include("${CMAKE_SOURCE_DIR}/cmake/support_tests.cmake") + FetchAndIncludeCatch() + + include(CTest) +endif() \ No newline at end of file diff --git a/pennylane_lightning/core/src/algorithms/AdjointJacobianBase.hpp b/pennylane_lightning/core/src/algorithms/AdjointJacobianBase.hpp new file mode 100644 index 0000000000..ebd2e288b4 --- /dev/null +++ b/pennylane_lightning/core/src/algorithms/AdjointJacobianBase.hpp @@ -0,0 +1,168 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @file AdjointJacobianBase.hpp + * Defines the base class to support the adjoint Jacobian differentiation + * method. + */ +#pragma once + +#include + +#include "JacobianData.hpp" +#include "Observables.hpp" + +namespace Pennylane::Algorithms { +/** + * @brief Adjoint Jacobian evaluator following the method of arXiV:2009.02823. + * + * @tparam StateVectorT State vector type. + */ +template class AdjointJacobianBase { + private: + using ComplexT = typename StateVectorT::ComplexT; + using PrecisionT = typename StateVectorT::PrecisionT; + + protected: + AdjointJacobianBase() = default; + AdjointJacobianBase(const AdjointJacobianBase &) = default; + AdjointJacobianBase(AdjointJacobianBase &&) noexcept = default; + AdjointJacobianBase &operator=(const AdjointJacobianBase &) = default; + AdjointJacobianBase &operator=(AdjointJacobianBase &&) noexcept = default; + + /** + * @brief Apply all operations from given + * `%OpsData` object to `%UpdatedStateVectorT`. + * + * @tparam UpdatedStateVectorT + * @param state Statevector to be updated. + * @param operations Operations to apply. + * @param adj Take the adjoint of the given operations. + */ + template + inline void applyOperations(UpdatedStateVectorT &state, + const OpsData &operations, + bool adj = false) { + for (size_t op_idx = 0; op_idx < operations.getOpsName().size(); + op_idx++) { + state.applyOperation(operations.getOpsName()[op_idx], + operations.getOpsWires()[op_idx], + operations.getOpsInverses()[op_idx] ^ adj, + operations.getOpsParams()[op_idx]); + } + } + + /** + * @brief Apply the adjoint indexed operation from + * `%OpsData` object to `%UpdatedStateVectorT`. + * + * @tparam UpdatedStateVectorT updated state vector type. + * @param state Statevector to be updated. + * @param operations Operations to apply. + * @param op_idx Adjointed operation index to apply. + */ + template + inline void applyOperationAdj(UpdatedStateVectorT &state, + const OpsData &operations, + size_t op_idx) { + state.applyOperation(operations.getOpsName()[op_idx], + operations.getOpsWires()[op_idx], + !operations.getOpsInverses()[op_idx], + operations.getOpsParams()[op_idx]); + } + + /** + * @brief Apply the adjoint indexed operation from several + * `%OpsData` objects to `%UpdatedStateVectorT` objects. + * + * @param states Vector of all statevectors; 1 per observable + * @param operations Operations list. + * @param op_idx Index of given operation within operations list to take + * adjoint of. + */ + inline void applyOperationsAdj(std::vector &states, + const OpsData &operations, + size_t op_idx) { + for (auto &state : states) { + applyOperationAdj(state, operations, op_idx); + } + } + + /** + * @brief Applies the gate generator for a given parametric gate. Returns + * the associated scaling coefficient. + * + * @param sv Statevector data to operate upon. + * @param op_name Name of parametric gate. + * @param wires Wires to operate upon. + * @param adj Indicate whether to take the adjoint of the operation. + * @return PrecisionT Generator scaling coefficient. + */ + inline auto applyGenerator(StateVectorT &sv, const std::string &op_name, + const std::vector &wires, const bool adj) + -> PrecisionT { + return sv.applyGenerator(op_name, wires, adj); + } + + /** + * @brief Apply a given `%Observable` object to + * `%StateVectorT`. + * + * @param state Statevector to be updated. + * @param observable Observable to apply. + */ + inline void applyObservable(StateVectorT &state, + const Observable &observable) { + observable.applyInPlace(state); + } + + /** + * @brief Apply several `%Observable` object. to + * `%StateVectorT` objects. + * + * @param states Vector of statevector copies, one per observable. + * @param reference_state Reference statevector + * @param observables Vector of observables to apply to each statevector. + */ + inline void applyObservables( + std::vector &states, const StateVectorT &reference_state, + const std::vector>> + &observables) { + size_t num_observables = observables.size(); + for (size_t i = 0; i < num_observables; i++) { + states[i].updateData(reference_state); + applyObservable(states[i], *observables[i]); + } + } + + /** + * @brief Calculates the statevector's Jacobian for the selected set + * of parametric gates. + * + * @param jac Preallocated vector for Jacobian data results. + * @param jd JacobianData represents the QuantumTape to differentiate. + * @param apply_operations Indicate whether to apply operations to tape.psi + * prior to calculation. + */ + inline void adjointJacobian(std::span jac, + const JacobianData &jd, + bool apply_operations = false) { + return static_cast(this)->adjointJacobian(jac, jd, + apply_operations); + } + + public: + ~AdjointJacobianBase() = default; +}; +} // namespace Pennylane::Algorithms \ No newline at end of file diff --git a/pennylane_lightning/core/src/algorithms/CMakeLists.txt b/pennylane_lightning/core/src/algorithms/CMakeLists.txt new file mode 100644 index 0000000000..4ebb93483f --- /dev/null +++ b/pennylane_lightning/core/src/algorithms/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.20) + +project(lightning_algorithms LANGUAGES CXX) + +add_library(lightning_algorithms INTERFACE) + +target_include_directories(lightning_algorithms INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(lightning_algorithms INTERFACE lightning_compile_options + lightning_external_libs + lightning_utils + lightning_observables + ) + +if (BUILD_TESTS) + enable_testing() + add_subdirectory("tests") +endif() \ No newline at end of file diff --git a/pennylane_lightning/src/algorithms/JacobianTape.hpp b/pennylane_lightning/core/src/algorithms/JacobianData.hpp similarity index 77% rename from pennylane_lightning/src/algorithms/JacobianTape.hpp rename to pennylane_lightning/core/src/algorithms/JacobianData.hpp index 923d20d85e..bef00080bb 100644 --- a/pennylane_lightning/src/algorithms/JacobianTape.hpp +++ b/pennylane_lightning/core/src/algorithms/JacobianData.hpp @@ -1,4 +1,4 @@ -// Copyright 2021 Xanadu Quantum Technologies Inc. +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,16 +13,6 @@ // limitations under the License. #pragma once -#include "Macros.hpp" -#include "Observables.hpp" -#include "StateVectorManagedCPU.hpp" -#include "Util.hpp" - -#if defined(_OPENMP) -#include -#endif - -#include #include #include #include @@ -30,21 +20,35 @@ #include #include +#include "Macros.hpp" +#include "Observables.hpp" +#include "Util.hpp" + +// using namespace Pennylane; +/// @cond DEV +namespace { +using Pennylane::Observables::Observable; +} // namespace +/// @endcond + namespace Pennylane::Algorithms { /** * @brief Utility class for encapsulating operations used by AdjointJacobian * class. */ -template class OpsData { +template class OpsData { private: + using PrecisionT = typename StateVectorT::PrecisionT; + using ComplexT = typename StateVectorT::ComplexT; + size_t num_par_ops_; size_t num_nonpar_ops_; const std::vector ops_name_; - const std::vector> ops_params_; + const std::vector> ops_params_; const std::vector> ops_wires_; const std::vector ops_inverses_; - const std::vector>> ops_matrices_; + const std::vector> ops_matrices_; public: /** @@ -60,15 +64,14 @@ template class OpsData { * supported. */ OpsData(std::vector ops_name, - const std::vector> &ops_params, + const std::vector> &ops_params, std::vector> ops_wires, std::vector ops_inverses, - std::vector>> ops_matrices) - : ops_name_{std::move(ops_name)}, ops_params_{ops_params}, - ops_wires_{std::move(ops_wires)}, + std::vector> ops_matrices) + : num_par_ops_{0}, ops_name_{std::move(ops_name)}, + ops_params_{ops_params}, ops_wires_{std::move(ops_wires)}, ops_inverses_{std::move(ops_inverses)}, ops_matrices_{ std::move(ops_matrices)} { - num_par_ops_ = 0; for (const auto &p : ops_params) { if (!p.empty()) { num_par_ops_++; @@ -82,20 +85,19 @@ template class OpsData { operations to apply upon the `%StateVector`. * * @see OpsData(const std::vector &ops_name, - const std::vector> &ops_params, + const std::vector> &ops_params, const std::vector> &ops_wires, const std::vector &ops_inverses, - const std::vector>> &ops_matrices) + const std::vector> &ops_matrices) */ OpsData(const std::vector &ops_name, - const std::vector> &ops_params, + const std::vector> &ops_params, std::vector> ops_wires, std::vector ops_inverses) - : ops_name_{ops_name}, ops_params_{ops_params}, + : num_par_ops_{0}, ops_name_{ops_name}, ops_params_{ops_params}, ops_wires_{std::move(ops_wires)}, ops_inverses_{std::move( ops_inverses)}, ops_matrices_(ops_name.size()) { - num_par_ops_ = 0; for (const auto &p : ops_params) { if (p.size() > 0) { num_par_ops_++; @@ -123,10 +125,10 @@ template class OpsData { * @brief Get the (optional) parameters for each operation. Given entries * are empty ({}) if not required. * - * @return const std::vector>& + * @return const std::vector>& */ [[nodiscard]] auto getOpsParams() const - -> const std::vector> & { + -> const std::vector> & { return ops_params_; } /** @@ -150,10 +152,10 @@ template class OpsData { * @brief Get the numerical matrix for a given unsupported operation. Given * entries are empty ({}) if not required. * - * @return const std::vector>>& + * @return const std::vector>& */ [[nodiscard]] auto getOpsMatrices() const - -> const std::vector>> & { + -> const std::vector> & { return ops_matrices_; } @@ -196,29 +198,38 @@ template class OpsData { /** * @brief Represent the serialized data of a QuantumTape to differentiate + * + * @tparam StateVectorT */ -template class JacobianData { +template class JacobianData { private: - size_t num_parameters; /**< Number of parameters in the tape */ - size_t num_elements; /**< Length of the statevector data */ - const std::complex *psi; /**< Pointer to the statevector data */ + using ComplexT = typename StateVectorT::ComplexT; + size_t num_parameters; /**< Number of parameters in the tape */ + size_t num_elements; /**< Length of the statevector data */ + const ComplexT *psi; /**< Pointer to the statevector data */ /** * @var observables * Observables for which to calculate Jacobian. */ - const std::vector>> observables; + const std::vector>> observables; /** * @var operations * operations Operations used to create given state. */ - const OpsData operations; + const OpsData operations; /* @var trainableParams */ const std::vector trainableParams; public: + JacobianData(const JacobianData &) = default; + JacobianData(JacobianData &&) noexcept = default; + JacobianData &operator=(const JacobianData &) = default; + JacobianData &operator=(JacobianData &&) noexcept = default; + virtual ~JacobianData() = default; + /** * @brief Construct a JacobianData object * @@ -238,10 +249,10 @@ template class JacobianData { * (e.g. QubitStateVector) or Hamiltonian coefficients. * @endrst */ - JacobianData(size_t num_params, size_t num_elem, std::complex *ps, - std::vector>> obs, - OpsData ops, std::vector trainP) - : num_parameters(num_params), num_elements(num_elem), psi(ps), + JacobianData(size_t num_params, size_t num_elem, const ComplexT *sv_ptr, + std::vector>> obs, + OpsData ops, std::vector trainP) + : num_parameters(num_params), num_elements(num_elem), psi(sv_ptr), observables(std::move(obs)), operations(std::move(ops)), trainableParams(std::move(trainP)) { /* When the Hamiltonian has parameters, trainable parameters include @@ -267,9 +278,9 @@ template class JacobianData { /** * @brief Get the pointer to the statevector data. * - * @return std::complex * + * @return ComplexT * */ - [[nodiscard]] auto getPtrStateVec() const -> const std::complex * { + [[nodiscard]] auto getPtrStateVec() const -> const ComplexT * { return psi; } @@ -279,7 +290,7 @@ template class JacobianData { * @return List of observables */ [[nodiscard]] auto getObservables() const - -> const std::vector>> & { + -> const std::vector>> & { return observables; } @@ -296,9 +307,9 @@ template class JacobianData { /** * @brief Get operations used to create given state. * - * @return OpsData& + * @return OpsData& */ - [[nodiscard]] auto getOperations() const -> const OpsData & { + [[nodiscard]] auto getOperations() const -> const OpsData & { return operations; } diff --git a/pennylane_lightning/src/.clang-tidy b/pennylane_lightning/core/src/algorithms/tests/.clang-tidy similarity index 95% rename from pennylane_lightning/src/.clang-tidy rename to pennylane_lightning/core/src/algorithms/tests/.clang-tidy index 8e91895e1d..747793823a 100644 --- a/pennylane_lightning/src/.clang-tidy +++ b/pennylane_lightning/core/src/algorithms/tests/.clang-tidy @@ -1,5 +1,5 @@ --- -Checks: '-*,clang-diagnostic-*,clang-analyzer-*,modernize-*,-modernize-use-trailing-return-type,-modernize-loop-convert,clang-analyzer-cplusplus*,openmp-*,performance-*,portability-*,readability-*,hicpp-*,-hicpp-avoid-c-arrays,-hicpp-no-array-decay,bugprone-suspicious-*,llvm-namespace-comment,cppcoreguidelines-avoid-non-const-global-variables,cppcoreguidelines-slicing,cppcoreguidelines-special-member-functions,-readability-identifier-length' +Checks: '-*,clang-diagnostic-*,clang-analyzer-*,-llvmlibc-*,modernize-*,-modernize-use-trailing-return-type,clang-analyzer-cplusplus*,openmp-*,performance-*,portability-*,readability-*,-modernize-avoid-c-arrays,-readability-magic-numbers,hicpp-*,-hicpp-no-array-decay,-hicpp-avoid-c-arrays,bugprone-suspicious-*,llvm-namespace-comment,cppcoreguidelines-slicing,cppcoreguidelines-special-member-functions,-readability-identifier-length' WarningsAsErrors: '*' HeaderFilterRegex: '.*' AnalyzeTemporaryDtors: false @@ -180,7 +180,7 @@ CheckOptions: - key: google-readability-namespace-comments.SpacesBeforeComments value: '2' - key: readability-function-cognitive-complexity.Threshold - value: '45' + value: '100' - key: readability-function-cognitive-complexity.IgnoreMacros value: 'true' - key: cppcoreguidelines-non-private-member-variables-in-classes.IgnoreClassesWithAllMemberVariablesBeingPublic @@ -230,3 +230,4 @@ CheckOptions: - key: readability-simplify-subscript-expr.Types value: '::std::basic_string;::std::basic_string_view;::std::vector;::std::array' ... + diff --git a/pennylane_lightning/core/src/algorithms/tests/CMakeLists.txt b/pennylane_lightning/core/src/algorithms/tests/CMakeLists.txt new file mode 100644 index 0000000000..8ec128d61b --- /dev/null +++ b/pennylane_lightning/core/src/algorithms/tests/CMakeLists.txt @@ -0,0 +1,43 @@ +cmake_minimum_required(VERSION 3.20) + +project(algorithms_tests) + +# Default build type for test code is Debug +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Debug) +endif() + +include("${CMAKE_SOURCE_DIR}/cmake/support_tests.cmake") +FetchAndIncludeCatch() + +################################################################################ +# Define library +################################################################################ + +add_library(algorithms_tests INTERFACE) +target_link_libraries(algorithms_tests INTERFACE Catch2::Catch2 + lightning_algorithms + ) + +# Create dependencies on the dynamically defined simulator/backend targets. +target_link_libraries(algorithms_tests INTERFACE ${PL_BACKEND} + "${PL_BACKEND}_algorithms" + "${PL_BACKEND}_observables" +) + +ProcessTestOptions(algorithms_tests) + +target_sources(algorithms_tests INTERFACE runner_algorithms.cpp) + +################################################################################ +# Define targets +################################################################################ +set(TEST_SOURCES Test_AdjointJacobian.cpp + ) + +add_executable(algorithms_test_runner ${TEST_SOURCES}) +target_link_libraries(algorithms_test_runner PRIVATE algorithms_tests) + +catch_discover_tests(algorithms_test_runner) + +install(TARGETS algorithms_test_runner DESTINATION bin) diff --git a/pennylane_lightning/core/src/algorithms/tests/Test_AdjointJacobian.cpp b/pennylane_lightning/core/src/algorithms/tests/Test_AdjointJacobian.cpp new file mode 100644 index 0000000000..3c9f220887 --- /dev/null +++ b/pennylane_lightning/core/src/algorithms/tests/Test_AdjointJacobian.cpp @@ -0,0 +1,605 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include + +#include "Error.hpp" // LightningException +#include "JacobianData.hpp" +#include "TestHelpers.hpp" // PL_REQUIRE_THROWS_MATCHES, linspace + +/// @cond DEV +namespace { +using namespace Pennylane::Util; +} // namespace +/// @endcond + +#ifdef _ENABLE_PLQUBIT +constexpr bool BACKEND_FOUND = true; + +#include "AdjointJacobianLQubit.hpp" +#include "ObservablesLQubit.hpp" +#include "TestHelpersStateVectors.hpp" // TestStateVectorBackends, StateVectorToName + +/// @cond DEV +namespace { +using namespace Pennylane::LightningQubit::Util; +using namespace Pennylane::LightningQubit::Algorithms; +using namespace Pennylane::LightningQubit::Observables; +} // namespace +/// @endcond + +#elif _ENABLE_PLKOKKOS == 1 +constexpr bool BACKEND_FOUND = true; + +#include "AdjointJacobianKokkos.hpp" +#include "ObservablesKokkos.hpp" +#include "TestHelpersStateVectors.hpp" // TestStateVectorBackends, StateVectorToName + +/// @cond DEV +namespace { +using namespace Pennylane::LightningKokkos::Util; +using namespace Pennylane::LightningKokkos::Algorithms; +using namespace Pennylane::LightningKokkos::Observables; +} // namespace + /// @endcond + +#else +constexpr bool BACKEND_FOUND = false; +using TestStateVectorBackends = Pennylane::Util::TypeList; + +template struct StateVectorToName {}; +#endif + +template void testAdjointJacobian() { + if constexpr (!std::is_same_v) { + using StateVectorT = typename TypeList::Type; + using PrecisionT = typename StateVectorT::PrecisionT; + using ComplexT = typename StateVectorT::ComplexT; + + const std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + + AdjointJacobian adj; + + DYNAMIC_SECTION("Throws an exception when size mismatches - " + << StateVectorToName::name) { + const std::vector tp{0, 1}; + const size_t num_qubits = 1; + const size_t num_params = 3; + const size_t num_obs = 1; + const auto obs = std::make_shared>( + "PauliZ", std::vector{0}); + std::vector jacobian(num_obs * tp.size() - 1, 0); + + auto ops = OpsData({"RX"}, {{0.742}}, {{0}}, {false}); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + PL_REQUIRE_THROWS_MATCHES( + adj.adjointJacobian(std::span{jacobian}, tape, true), + LightningException, + "The size of preallocated jacobian must be same as"); + } + + DYNAMIC_SECTION("No trainable params - " + << StateVectorToName::name) { + const std::vector tp{}; + const size_t num_qubits = 1; + const size_t num_params = 3; + const size_t num_obs = 1; + const auto obs = std::make_shared>( + "PauliZ", std::vector{0}); + std::vector jacobian(num_obs * tp.size(), 0); + + for (const auto &p : param) { + auto ops = OpsData({"RX"}, {{p}}, {{0}}, {false}); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + REQUIRE_NOTHROW( + adj.adjointJacobian(std::span{jacobian}, tape, true)); + } + } + + DYNAMIC_SECTION("Op=RX, Obs=Z - " + << StateVectorToName::name) { + const std::vector tp{0}; + + const size_t num_qubits = 1; + const size_t num_params = 3; + const size_t num_obs = 1; + const auto obs = std::make_shared>( + "PauliZ", std::vector{0}); + std::vector jacobian(num_obs * tp.size(), 0); + + for (const auto &p : param) { + auto ops = OpsData({"RX"}, {{p}}, {{0}}, {false}); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(-sin(p) == Approx(jacobian[0])); + } + } + + DYNAMIC_SECTION("Op=RY, Obs=X - " + << StateVectorToName::name) { + std::vector tp{0}; + const size_t num_qubits = 1; + const size_t num_params = 3; + const size_t num_obs = 1; + + const auto obs = std::make_shared>( + "PauliX", std::vector{0}); + std::vector jacobian(num_obs * tp.size(), 0); + + for (const auto &p : param) { + auto ops = OpsData({"RY"}, {{p}}, {{0}}, {false}); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(cos(p) == Approx(jacobian[0]).margin(1e-7)); + } + } + DYNAMIC_SECTION("Op=RX, Obs=[Z,Z] - " + << StateVectorToName::name) { + std::vector tp{0}; + const size_t num_qubits = 2; + const size_t num_params = 1; + const size_t num_obs = 2; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs1 = std::make_shared>( + "PauliZ", std::vector{0}); + const auto obs2 = std::make_shared>( + "PauliZ", std::vector{1}); + + auto ops = + OpsData({"RX"}, {{param[0]}}, {{0}}, {false}); + + JacobianData tape{num_params, psi.getLength(), + psi.getData(), {obs1, obs2}, + ops, tp}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(-sin(param[0]) == Approx(jacobian[0]).margin(1e-7)); + CHECK(0.0 == Approx(jacobian[1 * num_obs - 1]).margin(1e-7)); + } + + DYNAMIC_SECTION("Op=[RX,RX,RX], Obs=[Z,Z,Z] - " + << StateVectorToName::name) { + std::vector tp{0, 1, 2}; + const size_t num_qubits = 3; + const size_t num_params = 3; + const size_t num_obs = 3; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs1 = std::make_shared>( + "PauliZ", std::vector{0}); + const auto obs2 = std::make_shared>( + "PauliZ", std::vector{1}); + const auto obs3 = std::make_shared>( + "PauliZ", std::vector{2}); + + auto ops = OpsData( + {"RX", "RX", "RX"}, {{param[0]}, {param[1]}, {param[2]}}, + {{0}, {1}, {2}}, {false, false, false}); + + JacobianData tape{num_params, psi.getLength(), + psi.getData(), {obs1, obs2, obs3}, + ops, tp}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(-sin(param[0]) == Approx(jacobian[0]).margin(1e-7)); + CHECK(-sin(param[1]) == + Approx(jacobian[1 * num_params + 1]).margin(1e-7)); + CHECK(-sin(param[2]) == + Approx(jacobian[2 * num_params + 2]).margin(1e-7)); + } + + DYNAMIC_SECTION("Op=[RX,RX,RX], Obs=[Z,Z,Z], TParams=[0,2] - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector t_params{0, 2}; + const size_t num_qubits = 3; + const size_t num_params = 3; + const size_t num_obs = 3; + std::vector jacobian(num_obs * t_params.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs1 = std::make_shared>( + "PauliZ", std::vector{0}); + const auto obs2 = std::make_shared>( + "PauliZ", std::vector{1}); + const auto obs3 = std::make_shared>( + "PauliZ", std::vector{2}); + + auto ops = OpsData( + {"RX", "RX", "RX"}, {{param[0]}, {param[1]}, {param[2]}}, + {{0}, {1}, {2}}, {false, false, false}); + + JacobianData tape{num_params, psi.getLength(), + psi.getData(), {obs1, obs2, obs3}, + ops, t_params}; + + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(-sin(param[0]) == Approx(jacobian[0]).margin(1e-7)); + CHECK(0 == Approx(jacobian[1 * t_params.size() + 1]).margin(1e-7)); + CHECK(-sin(param[2]) == + Approx(jacobian[2 * t_params.size() + 1]).margin(1e-7)); + } + + DYNAMIC_SECTION("Op=[RX,RX,RX], Obs=[ZZZ] - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector tp{0, 1, 2}; + const size_t num_qubits = 3; + const size_t num_params = 3; + const size_t num_obs = 1; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs = std::make_shared>( + std::make_shared>( + "PauliZ", std::vector{0}), + std::make_shared>( + "PauliZ", std::vector{1}), + std::make_shared>( + "PauliZ", std::vector{2})); + auto ops = OpsData( + {"RX", "RX", "RX"}, {{param[0]}, {param[1]}, {param[2]}}, + {{0}, {1}, {2}}, {false, false, false}); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + + // Computed with parameter shift + CHECK(-0.1755096592645253 == Approx(jacobian[0]).margin(1e-7)); + CHECK(0.26478810666384334 == Approx(jacobian[1]).margin(1e-7)); + CHECK(-0.6312451595102775 == Approx(jacobian[2]).margin(1e-7)); + } + + DYNAMIC_SECTION("Op=Mixed, Obs=[XXX] - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector tp{0, 1, 2, 3, 4, 5}; + const size_t num_qubits = 3; + const size_t num_params = 6; + const size_t num_obs = 1; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs = std::make_shared>( + std::make_shared>( + "PauliX", std::vector{0}), + std::make_shared>( + "PauliX", std::vector{1}), + std::make_shared>( + "PauliX", std::vector{2})); + auto ops = OpsData( + {"RZ", "RY", "RZ", "CNOT", "CNOT", "RZ", "RY", "RZ"}, + {{param[0]}, + {param[1]}, + {param[2]}, + {}, + {}, + {param[0]}, + {param[1]}, + {param[2]}}, + {{0}, {0}, {0}, {0, 1}, {1, 2}, {1}, {1}, {1}}, + {false, false, false, false, false, false, false, false}); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + + // Computed with PennyLane using default.qubit.adjoint_jacobian + CHECK(0.0 == Approx(jacobian[0]).margin(1e-7)); + CHECK(-0.674214427 == Approx(jacobian[1]).margin(1e-7)); + CHECK(0.275139672 == Approx(jacobian[2]).margin(1e-7)); + CHECK(0.275139672 == Approx(jacobian[3]).margin(1e-7)); + CHECK(-0.0129093062 == Approx(jacobian[4]).margin(1e-7)); + CHECK(0.323846156 == Approx(jacobian[5]).margin(1e-7)); + } + + DYNAMIC_SECTION("Decomposed Rot gate, non computational basis state - " + << StateVectorToName::name) { + const std::vector tp{0, 1, 2}; + const size_t num_params = 3; + const size_t num_obs = 1; + + PrecisionT limit = 2 * M_PI; + const std::vector thetas = linspace(-limit, limit, 7); + + std::vector> expec_results{ + {0, -9.90819496e-01, 0}, + {-8.18996553e-01, 1.62526544e-01, 0}, + {-0.203949, 0.48593716, 0}, + {0, 1, 0}, + {-2.03948985e-01, 4.85937177e-01, 0}, + {-8.18996598e-01, 1.62526487e-01, 0}, + {0, -9.90819511e-01, 0}}; + + const auto obs = std::make_shared>( + "PauliZ", std::vector{0}); + + for (size_t i = 0; i < thetas.size(); i++) { + const PrecisionT theta = thetas[i]; + std::vector local_params{ + theta, std::pow(theta, (PrecisionT)3), + SQRT2() * theta}; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata{INVSQRT2(), + -INVSQRT2()}; + StateVectorT psi(cdata.data(), cdata.size()); + + auto ops = OpsData( + {"RZ", "RY", "RZ"}, + {{local_params[0]}, {local_params[1]}, {local_params[2]}}, + {{0}, {0}, {0}}, {false, false, false}); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {obs}, ops, tp}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(theta); + CAPTURE(jacobian); + + // Computed with PennyLane using default.qubit + CHECK(expec_results[i][0] == Approx(jacobian[0]).margin(1e-4)); + CHECK(expec_results[i][1] == Approx(jacobian[1]).margin(1e-4)); + CHECK(expec_results[i][2] == Approx(jacobian[2]).margin(1e-4)); + } + } + + DYNAMIC_SECTION("Mixed Ops, Obs and TParams- " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + const std::vector t_params{1, 2, 3}; + const size_t num_obs = 1; + + PrecisionT limit = 2 * M_PI; + const std::vector thetas = linspace(-limit, limit, 8); + + std::vector local_params{0.543, 0.54, 0.1, 0.5, 1.3, + -2.3, 0.5, -0.5, 0.5}; + std::vector jacobian(num_obs * t_params.size(), 0); + + std::vector cdata{ONE(), ZERO(), + ZERO(), ZERO()}; + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs = std::make_shared>( + std::make_shared>( + "PauliX", std::vector{0}), + std::make_shared>( + "PauliZ", std::vector{1})); + + auto ops = OpsData( + {"Hadamard", "RX", "CNOT", "RZ", "RY", "RZ", "RZ", "RY", "RZ", + "RZ", "RY", "CNOT"}, + {{}, + {local_params[0]}, + {}, + {local_params[1]}, + {local_params[2]}, + {local_params[3]}, + {local_params[4]}, + {local_params[5]}, + {local_params[6]}, + {local_params[7]}, + {local_params[8]}, + {}}, + {{0}, + {0}, + {0, 1}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {1}, + {0, 1}}, + {false, false, false, false, false, false, false, false, false, + false, false, false}); + + JacobianData tape{ + t_params.size(), psi.getLength(), psi.getData(), {obs}, ops, + t_params}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + std::vector expected{-0.71429188, 0.04998561, + -0.71904837}; + // Computed with PennyLane using default.qubit + CHECK(expected[0] == Approx(jacobian[0])); + CHECK(expected[1] == Approx(jacobian[1])); + CHECK(expected[2] == Approx(jacobian[2])); + } + + DYNAMIC_SECTION("Op=RX, Obs=Ham[Z0+Z1] - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector tp{0}; + const size_t num_qubits = 2; + const size_t num_params = 1; + const size_t num_obs = 1; + std::vector jacobian(num_obs * tp.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + const auto obs1 = std::make_shared>( + "PauliZ", std::vector{0}); + const auto obs2 = std::make_shared>( + "PauliZ", std::vector{1}); + + auto ham = + Hamiltonian::create({0.3, 0.7}, {obs1, obs2}); + + auto ops = + OpsData({"RX"}, {{param[0]}}, {{0}}, {false}); + + JacobianData tape{ + num_params, psi.getLength(), psi.getData(), {ham}, ops, tp}; + + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK(-0.3 * sin(param[0]) == Approx(jacobian[0]).margin(1e-7)); + } + + DYNAMIC_SECTION("Op=[RX,RX,RX], Obs=Ham[Z0+Z1+Z2], TParams=[0,2] - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector t_params{0, 2}; + const size_t num_qubits = 3; + const size_t num_params = 3; + const size_t num_obs = 1; + std::vector jacobian(num_obs * t_params.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + auto obs1 = std::make_shared>( + "PauliZ", std::vector{0}); + auto obs2 = std::make_shared>( + "PauliZ", std::vector{1}); + auto obs3 = std::make_shared>( + "PauliZ", std::vector{2}); + + auto ham = Hamiltonian::create({0.47, 0.32, 0.96}, + {obs1, obs2, obs3}); + + auto ops = OpsData( + {"RX", "RX", "RX"}, {{param[0]}, {param[1]}, {param[2]}}, + {{0}, {1}, {2}}, {false, false, false}); + + JacobianData tape{num_params, psi.getLength(), + psi.getData(), {ham}, + ops, t_params}; + adj.adjointJacobian(std::span{jacobian}, tape, true); + + CAPTURE(jacobian); + CHECK((-0.47 * sin(param[0]) == Approx(jacobian[0]).margin(1e-7))); + CHECK((-0.96 * sin(param[2]) == Approx(jacobian[1]).margin(1e-7))); + } + + DYNAMIC_SECTION("HermitianObs - " + << StateVectorToName::name) { + std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + std::vector t_params{0, 2}; + const size_t num_qubits = 3; + const size_t num_params = 3; + const size_t num_obs = 1; + std::vector jacobian1(num_obs * t_params.size(), 0); + std::vector jacobian2(num_obs * t_params.size(), 0); + + std::vector cdata(1U << num_qubits); + cdata[0] = ComplexT{1, 0}; + StateVectorT psi(cdata.data(), cdata.size()); + + auto obs1 = std::make_shared>( + std::make_shared>( + "PauliZ", std::vector{0}), + std::make_shared>( + "PauliZ", std::vector{1})); + auto obs2 = std::make_shared>( + std::vector{1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, + 0, 0, 1}, + std::vector{0, 1}); + + auto ops = OpsData( + {"RX", "RX", "RX"}, {{param[0]}, {param[1]}, {param[2]}}, + {{0}, {1}, {2}}, {false, false, false}); + + JacobianData tape1{num_params, psi.getLength(), + psi.getData(), {obs1}, + ops, t_params}; + + JacobianData tape2{num_params, psi.getLength(), + psi.getData(), {obs2}, + ops, t_params}; + adj.adjointJacobian(std::span{jacobian1}, tape1, true); + adj.adjointJacobian(std::span{jacobian2}, tape2, true); + + CHECK((jacobian1 == PLApprox(jacobian2).margin(1e-7))); + } + + testAdjointJacobian(); + } +} + +TEST_CASE("Algorithms::adjointJacobian", "[Algorithms]") { + if constexpr (BACKEND_FOUND) { + testAdjointJacobian(); + } +} \ No newline at end of file diff --git a/pennylane_lightning/src/tests/runner_main.cpp b/pennylane_lightning/core/src/algorithms/tests/runner_algorithms.cpp similarity index 100% rename from pennylane_lightning/src/tests/runner_main.cpp rename to pennylane_lightning/core/src/algorithms/tests/runner_algorithms.cpp diff --git a/pennylane_lightning/core/src/bindings/Bindings.cpp b/pennylane_lightning/core/src/bindings/Bindings.cpp new file mode 100644 index 0000000000..62d3e419c0 --- /dev/null +++ b/pennylane_lightning/core/src/bindings/Bindings.cpp @@ -0,0 +1,59 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @file Bindings.cpp + * Export C++ functions to Python using Pybind. + */ +#include "Bindings.hpp" + +#include "pybind11/pybind11.h" + +// Defining the module name. +#if defined(_ENABLE_PLQUBIT) +#define LIGHTNING_MODULE_NAME lightning_qubit_ops +#elif _ENABLE_PLKOKKOS == 1 +#define LIGHTNING_MODULE_NAME lightning_kokkos_ops +#endif + +#if defined(LIGHTNING_MODULE_NAME) +/// @cond DEV +namespace { +using namespace Pennylane; +} // namespace +/// @endcond + +/** + * @brief Add C++ classes, methods and functions to Python module. + */ +PYBIND11_MODULE( + LIGHTNING_MODULE_NAME, // NOLINT: No control over Pybind internals + m) { + // Suppress doxygen autogenerated signatures + + pybind11::options options; + options.disable_function_signatures(); + + // Register functionality for numpy array memory alignment: + registerArrayAlignmentBindings(m); + + // Register bindings for general info: + registerInfo(m); + + // Register bindings for backend-specific info: + registerBackendSpecificInfo(m); + + registerLightningClassBindings(m); +} + +#endif \ No newline at end of file diff --git a/pennylane_lightning/core/src/bindings/Bindings.hpp b/pennylane_lightning/core/src/bindings/Bindings.hpp new file mode 100644 index 0000000000..7f08863f63 --- /dev/null +++ b/pennylane_lightning/core/src/bindings/Bindings.hpp @@ -0,0 +1,623 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file Bindings.hpp + * Defines device-agnostic operations to export to Python and other utility + * functions interfacing with Pybind11. + */ + +#pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "CPUMemoryModel.hpp" // CPUMemoryModel, getMemoryModel, bestCPUMemoryModel, getAlignment +#include "JacobianData.hpp" +#include "Macros.hpp" // CPUArch +#include "Memory.hpp" // alignedAlloc +#include "Observables.hpp" +#include "Util.hpp" // for_each_enum + +#ifdef _ENABLE_PLQUBIT + +#include "AdjointJacobianLQubit.hpp" +#include "LQubitBindings.hpp" // StateVectorBackends, registerBackendClassSpecificBindings, registerBackendSpecificMeasurements, registerBackendSpecificAlgorithms +#include "MeasurementsLQubit.hpp" +#include "ObservablesLQubit.hpp" + +/// @cond DEV +namespace { +using namespace Pennylane::LightningQubit; +using namespace Pennylane::LightningQubit::Algorithms; +using namespace Pennylane::LightningQubit::Observables; +using namespace Pennylane::LightningQubit::Measures; +} // namespace +/// @endcond + +#elif _ENABLE_PLKOKKOS == 1 + +#include "AdjointJacobianKokkos.hpp" +#include "LKokkosBindings.hpp" // StateVectorBackends, registerBackendClassSpecificBindings, registerBackendSpecificMeasurements, registerBackendSpecificAlgorithms +#include "MeasurementsKokkos.hpp" +#include "ObservablesKokkos.hpp" + +/// @cond DEV +namespace { +using namespace Pennylane::LightningKokkos; +using namespace Pennylane::LightningKokkos::Algorithms; +using namespace Pennylane::LightningKokkos::Observables; +using namespace Pennylane::LightningKokkos::Measures; +} // namespace + /// @endcond + +#else + +static_assert(false, "Backend not found."); + +#endif + +/// @cond DEV +namespace { +using Pennylane::Util::bestCPUMemoryModel; +using Pennylane::Util::CPUMemoryModel; +using Pennylane::Util::getMemoryModel; +} // namespace +/// @endcond + +namespace py = pybind11; + +namespace Pennylane { +/** + * @brief Create a State Vector From a 1D Numpy Data object. + * + * @tparam StateVectorT + * @param numpyArray inout data + * @return StateVectorT + */ +template +auto createStateVectorFromNumpyData( + const py::array_t> + &numpyArray) -> StateVectorT { + using ComplexT = typename StateVectorT::ComplexT; + py::buffer_info numpyArrayInfo = numpyArray.request(); + if (numpyArrayInfo.ndim != 1) { + throw std::invalid_argument( + "NumPy array must be a 1-dimensional array"); + } + if (numpyArrayInfo.itemsize != sizeof(ComplexT)) { + throw std::invalid_argument( + "NumPy array must be of type np.complex64 or np.complex128"); + } + auto *data_ptr = static_cast(numpyArrayInfo.ptr); + return StateVectorT( + {data_ptr, static_cast(numpyArrayInfo.shape[0])}); +} + +/** + * @brief Get memory alignment of a given numpy array. + * + * @param numpyArray Pybind11's numpy array type. + * @return CPUMemoryModel Memory model describing alignment + */ +auto getNumpyArrayAlignment(const py::array &numpyArray) -> CPUMemoryModel { + return getMemoryModel(numpyArray.request().ptr); +} + +/** + * @brief Create an aligned numpy array for a given type, memory model and array + * size. + * + * @tparam T Datatype of numpy array to create + * @param memory_model Memory model to use + * @param size Size of the array to create + * @return Numpy array + */ +template +auto alignedNumpyArray(CPUMemoryModel memory_model, size_t size) -> py::array { + using Pennylane::Util::alignedAlloc; + if (getAlignment(memory_model) > alignof(std::max_align_t)) { + void *ptr = + alignedAlloc(getAlignment(memory_model), sizeof(T) * size); + auto capsule = py::capsule(ptr, &Util::alignedFree); + return py::array{py::dtype::of(), {size}, {sizeof(T)}, ptr, capsule}; + } + void *ptr = static_cast(new T[size]); + auto capsule = + py::capsule(ptr, [](void *p) { delete static_cast(p); }); + return py::array{py::dtype::of(), {size}, {sizeof(T)}, ptr, capsule}; +} +/** + * @brief Create a numpy array whose underlying data is allocated by + * lightning. + * + * See https://github.com/pybind/pybind11/issues/1042#issuecomment-325941022 + * for capsule usage. + * + * @param size Size of the array to create + * @param dt Pybind11's datatype object + */ +auto allocateAlignedArray(size_t size, const py::dtype &dt) -> py::array { + auto memory_model = bestCPUMemoryModel(); + + if (dt.is(py::dtype::of())) { + return alignedNumpyArray(memory_model, size); + } + if (dt.is(py::dtype::of())) { + return alignedNumpyArray(memory_model, size); + } + if (dt.is(py::dtype::of>())) { + return alignedNumpyArray>(memory_model, size); + } + if (dt.is(py::dtype::of>())) { + return alignedNumpyArray>(memory_model, size); + } + throw py::type_error("Unsupported datatype."); +} + +/** + * @brief Register functionality for numpy array memory alignment. + * + * @param m Pybind module + */ +void registerArrayAlignmentBindings(py::module_ &m) { + /* Add CPUMemoryModel enum class */ + py::enum_(m, "CPUMemoryModel", py::module_local()) + .value("Unaligned", CPUMemoryModel::Unaligned) + .value("Aligned256", CPUMemoryModel::Aligned256) + .value("Aligned512", CPUMemoryModel::Aligned512); + + /* Add array alignment functionality */ + m.def("get_alignment", &getNumpyArrayAlignment, + "Get alignment of an underlying data for a numpy array."); + m.def("allocate_aligned_array", &allocateAlignedArray, + "Get numpy array whose underlying data is aligned."); + m.def("best_alignment", &bestCPUMemoryModel, + "Best memory alignment. for the simulator."); +} + +/** + * @brief Return basic information of the compiled binary. + */ +auto getCompileInfo() -> py::dict { + using namespace Pennylane::Util; + using namespace py::literals; + + const std::string_view cpu_arch_str = [] { + switch (cpu_arch) { + case CPUArch::X86_64: + return "x86_64"; + case CPUArch::PPC64: + return "PPC64"; + case CPUArch::ARM: + return "ARM"; + default: + return "Unknown"; + } + }(); + + const std::string_view compiler_name_str = [] { + switch (compiler) { + case Compiler::GCC: + return "GCC"; + case Compiler::Clang: + return "Clang"; + case Compiler::MSVC: + return "MSVC"; + case Compiler::NVCC: + return "NVCC"; + case Compiler::NVHPC: + return "NVHPC"; + default: + return "Unknown"; + } + }(); + + const auto compiler_version_str = getCompilerVersion(); + + return py::dict("cpu.arch"_a = cpu_arch_str, + "compiler.name"_a = compiler_name_str, + "compiler.version"_a = compiler_version_str, + "AVX2"_a = use_avx2, "AVX512F"_a = use_avx512f); +} + +/** + * @brief Return basic information of runtime environment. + */ +auto getRuntimeInfo() -> py::dict { + using Pennylane::Util::RuntimeInfo; + using namespace py::literals; + + return py::dict("AVX"_a = RuntimeInfo::AVX(), + "AVX2"_a = RuntimeInfo::AVX2(), + "AVX512F"_a = RuntimeInfo::AVX512F()); +} + +/** + * @brief Register bindings for general info. + * + * @param m Pybind11 module. + */ +void registerInfo(py::module_ &m) { + /* Add compile info */ + m.def("compile_info", &getCompileInfo, "Compiled binary information."); + + /* Add runtime info */ + m.def("runtime_info", &getRuntimeInfo, "Runtime information."); +} + +/** + * @brief Register observable classes. + * + * @tparam StateVectorT + * @param m Pybind module + */ +template void registerObservables(py::module_ &m) { + using PrecisionT = + typename StateVectorT::PrecisionT; // Statevector's precision. + using ComplexT = + typename StateVectorT::ComplexT; // Statevector's complex type. + using ParamT = PrecisionT; // Parameter's data precision + + const std::string bitsize = + std::to_string(sizeof(std::complex) * 8); + + using np_arr_c = py::array_t, py::array::c_style>; + using np_arr_r = py::array_t; + + std::string class_name; + + class_name = "ObservableC" + bitsize; + py::class_, + std::shared_ptr>>(m, class_name.c_str(), + py::module_local()); + + class_name = "NamedObsC" + bitsize; + py::class_, std::shared_ptr>, + Observable>(m, class_name.c_str(), + py::module_local()) + .def(py::init( + [](const std::string &name, const std::vector &wires) { + return NamedObs(name, wires); + })) + .def("__repr__", &NamedObs::getObsName) + .def("get_wires", &NamedObs::getWires, + "Get wires of observables") + .def( + "__eq__", + [](const NamedObs &self, py::handle other) -> bool { + if (!py::isinstance>(other)) { + return false; + } + auto other_cast = other.cast>(); + return self == other_cast; + }, + "Compare two observables"); + + class_name = "HermitianObsC" + bitsize; + py::class_, + std::shared_ptr>, + Observable>(m, class_name.c_str(), + py::module_local()) + .def(py::init( + [](const np_arr_c &matrix, const std::vector &wires) { + auto buffer = matrix.request(); + const auto *ptr = static_cast(buffer.ptr); + return HermitianObs( + std::vector(ptr, ptr + buffer.size), wires); + })) + .def("__repr__", &HermitianObs::getObsName) + .def("get_wires", &HermitianObs::getWires, + "Get wires of observables") + .def( + "__eq__", + [](const HermitianObs &self, + py::handle other) -> bool { + if (!py::isinstance>(other)) { + return false; + } + auto other_cast = other.cast>(); + return self == other_cast; + }, + "Compare two observables"); + + class_name = "TensorProdObsC" + bitsize; + py::class_, + std::shared_ptr>, + Observable>(m, class_name.c_str(), + py::module_local()) + .def(py::init( + [](const std::vector>> + &obs) { return TensorProdObs(obs); })) + .def("__repr__", &TensorProdObs::getObsName) + .def("get_wires", &TensorProdObs::getWires, + "Get wires of observables") + .def( + "__eq__", + [](const TensorProdObs &self, + py::handle other) -> bool { + if (!py::isinstance>(other)) { + return false; + } + auto other_cast = other.cast>(); + return self == other_cast; + }, + "Compare two observables"); + + class_name = "HamiltonianC" + bitsize; + using ObsPtr = std::shared_ptr>; + py::class_, + std::shared_ptr>, + Observable>(m, class_name.c_str(), + py::module_local()) + .def(py::init( + [](const np_arr_r &coeffs, const std::vector &obs) { + auto buffer = coeffs.request(); + const auto ptr = static_cast(buffer.ptr); + return Hamiltonian{ + std::vector(ptr, ptr + buffer.size), obs}; + })) + .def("__repr__", &Hamiltonian::getObsName) + .def("get_wires", &Hamiltonian::getWires, + "Get wires of observables") + .def( + "__eq__", + [](const Hamiltonian &self, + py::handle other) -> bool { + if (!py::isinstance>(other)) { + return false; + } + auto other_cast = other.cast>(); + return self == other_cast; + }, + "Compare two observables"); +} + +/** + * @brief Register agnostic measurements class functionalities. + * + * @tparam StateVectorT + * @tparam PyClass + * @param pyclass Pybind11's measurements class to bind methods. + */ +template +void registerBackendAgnosticMeasurements(PyClass &pyclass) { + using PrecisionT = + typename StateVectorT::PrecisionT; // Statevector's precision. + using ParamT = PrecisionT; // Parameter's data precision + + pyclass + .def("probs", + [](Measurements &M, + const std::vector &wires) { + return py::array_t(py::cast(M.probs(wires))); + }) + .def("probs", + [](Measurements &M) { + return py::array_t(py::cast(M.probs())); + }) + .def( + "expval", + [](Measurements &M, + const std::shared_ptr> &ob) { + return M.expval(*ob); + }, + "Expected value of an observable object.") + .def( + "var", + [](Measurements &M, + const std::shared_ptr> &ob) { + return M.var(*ob); + }, + "Variance of an observable object.") + .def("generate_samples", [](Measurements &M, + size_t num_wires, size_t num_shots) { + auto &&result = M.generate_samples(num_shots); + const size_t ndim = 2; + const std::vector shape{num_shots, num_wires}; + constexpr auto sz = sizeof(size_t); + const std::vector strides{sz * num_wires, sz}; + // return 2-D NumPy array + return py::array(py::buffer_info( + result.data(), /* data as contiguous array */ + sz, /* size of one scalar */ + py::format_descriptor::format(), /* data type */ + ndim, /* number of dimensions */ + shape, /* shape of the matrix */ + strides /* strides for each axis */ + )); + }); +} + +/** + * @brief Register the adjoint Jacobian method. + */ +template +auto registerAdjointJacobian( + AdjointJacobian &adjoint_jacobian, const StateVectorT &sv, + const std::vector>> &observables, + const OpsData &operations, + const std::vector &trainableParams) + -> py::array_t { + using PrecisionT = typename StateVectorT::PrecisionT; + std::vector jac(observables.size() * trainableParams.size(), + PrecisionT{0.0}); + const JacobianData jd{operations.getTotalNumParams(), + sv.getLength(), + sv.getData(), + observables, + operations, + trainableParams}; + + adjoint_jacobian.adjointJacobian(std::span{jac}, jd); + + return py::array_t(py::cast(jac)); +} + +/** + * @brief Register agnostic algorithms. + * + * @tparam StateVectorT + * @param m Pybind module + */ +template +void registerBackendAgnosticAlgorithms(py::module_ &m) { + using PrecisionT = + typename StateVectorT::PrecisionT; // Statevector's precision + using ComplexT = + typename StateVectorT::ComplexT; // Statevector's complex type + using ParamT = PrecisionT; // Parameter's data precision + + using np_arr_c = py::array_t, py::array::c_style>; + + const std::string bitsize = + std::to_string(sizeof(std::complex) * 8); + + std::string class_name; + + //***********************************************************************// + // Operations + //***********************************************************************// + + class_name = "OpsStructC" + bitsize; + py::class_>(m, class_name.c_str(), py::module_local()) + .def(py::init &, + const std::vector> &, + const std::vector> &, + const std::vector &, + const std::vector> &>()) + .def("__repr__", [](const OpsData &ops) { + using namespace Pennylane::Util; + std::ostringstream ops_stream; + for (size_t op = 0; op < ops.getSize(); op++) { + ops_stream << "{'name': " << ops.getOpsName()[op]; + ops_stream << ", 'params': " << ops.getOpsParams()[op]; + ops_stream << ", 'inv': " << ops.getOpsInverses()[op]; + ops_stream << "}"; + if (op < ops.getSize() - 1) { + ops_stream << ","; + } + } + return "Operations: [" + ops_stream.str() + "]"; + }); + + /** + * Create operation list. + * */ + std::string function_name = "create_ops_listC" + bitsize; + m.def( + function_name.c_str(), + [](const std::vector &ops_name, + const std::vector> &ops_params, + const std::vector> &ops_wires, + const std::vector &ops_inverses, + const std::vector &ops_matrices) { + std::vector> conv_matrices( + ops_matrices.size()); + for (size_t op = 0; op < ops_name.size(); op++) { + const auto m_buffer = ops_matrices[op].request(); + if (m_buffer.size) { + const auto m_ptr = + static_cast(m_buffer.ptr); + conv_matrices[op] = + std::vector{m_ptr, m_ptr + m_buffer.size}; + } + } + return OpsData{ops_name, ops_params, ops_wires, + ops_inverses, conv_matrices}; + }, + "Create a list of operations from data."); + + //***********************************************************************// + // Adjoint Jacobian + //***********************************************************************// + class_name = "AdjointJacobianC" + bitsize; + py::class_>(m, class_name.c_str(), + py::module_local()) + .def(py::init<>()) + .def("__call__", ®isterAdjointJacobian, + "Adjoint Jacobian method."); +} + +/** + * @brief Templated class to build lightning class bindings. + * + * @tparam StateVectorT State vector type + * @param m Pybind11 module. + */ +template void lightningClassBindings(py::module_ &m) { + using PrecisionT = + typename StateVectorT::PrecisionT; // Statevector's precision. + // Enable module name to be based on size of complex datatype + const std::string bitsize = + std::to_string(sizeof(std::complex) * 8); + + //***********************************************************************// + // StateVector + //***********************************************************************// + std::string class_name = "StateVectorC" + bitsize; + auto pyclass = + py::class_(m, class_name.c_str(), py::module_local()); + pyclass.def(py::init(&createStateVectorFromNumpyData)) + .def_property_readonly("size", &StateVectorT::getLength); + + registerBackendClassSpecificBindings(pyclass); + + //***********************************************************************// + // Observables + //***********************************************************************// + /* Observables submodule */ + py::module_ obs_submodule = + m.def_submodule("observables", "Submodule for observables classes."); + registerObservables(obs_submodule); + + //***********************************************************************// + // Measurements + //***********************************************************************// + class_name = "MeasurementsC" + bitsize; + auto pyclass_measurements = py::class_>( + m, class_name.c_str(), py::module_local()); + + pyclass_measurements.def(py::init()); + registerBackendAgnosticMeasurements(pyclass_measurements); + registerBackendSpecificMeasurements(pyclass_measurements); + + //***********************************************************************// + // Algorithms + //***********************************************************************// + /* Algorithms submodule */ + py::module_ alg_submodule = m.def_submodule( + "algorithms", "Submodule for the algorithms functionality."); + registerBackendAgnosticAlgorithms(alg_submodule); + registerBackendSpecificAlgorithms(alg_submodule); +} + +template +void registerLightningClassBindings(py::module_ &m) { + if constexpr (!std::is_same_v) { + using StateVectorT = typename TypeList::Type; + lightningClassBindings(m); + registerLightningClassBindings(m); + } +} +} // namespace Pennylane diff --git a/pennylane_lightning/core/src/bindings/BindingsBase.hpp b/pennylane_lightning/core/src/bindings/BindingsBase.hpp new file mode 100644 index 0000000000..4f0e0357b2 --- /dev/null +++ b/pennylane_lightning/core/src/bindings/BindingsBase.hpp @@ -0,0 +1,82 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +#include "ConstantUtil.hpp" // lookup +#include "GateOperation.hpp" + +/// @cond DEV +namespace {} // namespace +/// @endcond + +namespace py = pybind11; + +namespace Pennylane::Bindings { +/** + * @brief Register matrix. + */ +template +void registerMatrix( + StateVectorT &st, + const py::array_t, + py::array::c_style | py::array::forcecast> &matrix, + const std::vector &wires, bool inverse = false) { + using ComplexT = typename StateVectorT::ComplexT; + st.applyMatrix(static_cast(matrix.request().ptr), wires, + inverse); +} + +/** + * @brief Register StateVector class to pybind. + * + * @tparam StateVectorT Statevector type to register + * @tparam Pyclass Pybind11's class object type + * + * @param pyclass Pybind11's class object to bind statevector + */ +template +void registerGatesForStateVector(PyClass &pyclass) { + using PrecisionT = + typename StateVectorT::PrecisionT; // Statevector's precision + using ParamT = PrecisionT; // Parameter's data precision + + using Pennylane::Gates::GateOperation; + using Pennylane::Util::for_each_enum; + namespace Constant = Pennylane::Gates::Constant; + + pyclass.def("applyMatrix", ®isterMatrix, + "Apply a given matrix to wires."); + + for_each_enum([&pyclass](GateOperation gate_op) { + using Pennylane::Util::lookup; + const auto gate_name = + std::string(lookup(Constant::gate_names, gate_op)); + const std::string doc = "Apply the " + gate_name + " gate."; + auto func = [gate_name = gate_name]( + StateVectorT &sv, const std::vector &wires, + bool inverse, const std::vector ¶ms) { + sv.applyOperation(gate_name, wires, inverse, params); + }; + pyclass.def(gate_name.c_str(), func, doc.c_str()); + }); +} +} // namespace Pennylane::Bindings diff --git a/pennylane_lightning/core/src/bindings/CMakeLists.txt b/pennylane_lightning/core/src/bindings/CMakeLists.txt new file mode 100644 index 0000000000..7dd633fdda --- /dev/null +++ b/pennylane_lightning/core/src/bindings/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.20) + +project(lightning_bindings LANGUAGES CXX) + +add_library(lightning_bindings INTERFACE) + +target_include_directories(lightning_bindings INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) + +target_link_libraries(lightning_bindings INTERFACE lightning_utils) + +set_property(TARGET lightning_bindings PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/pennylane_lightning/core/src/gates/CMakeLists.txt b/pennylane_lightning/core/src/gates/CMakeLists.txt new file mode 100644 index 0000000000..6cbd5cf603 --- /dev/null +++ b/pennylane_lightning/core/src/gates/CMakeLists.txt @@ -0,0 +1,6 @@ +project(lightning_gates) + +add_library(lightning_gates INTERFACE) +target_include_directories(lightning_gates INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) + +target_link_libraries(lightning_gates INTERFACE lightning_utils) diff --git a/pennylane_lightning/src/gates/Constant.hpp b/pennylane_lightning/core/src/gates/Constant.hpp similarity index 99% rename from pennylane_lightning/src/gates/Constant.hpp rename to pennylane_lightning/core/src/gates/Constant.hpp index c80a38acf4..ca94479cf3 100644 --- a/pennylane_lightning/src/gates/Constant.hpp +++ b/pennylane_lightning/core/src/gates/Constant.hpp @@ -1,4 +1,4 @@ -// Copyright 2021 Xanadu Quantum Technologies Inc. +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,9 @@ */ #pragma once +#include + #include "GateOperation.hpp" -#include "KernelType.hpp" #include "TypeList.hpp" namespace Pennylane::Gates::Constant { diff --git a/pennylane_lightning/src/gates/GateOperation.hpp b/pennylane_lightning/core/src/gates/GateOperation.hpp similarity index 95% rename from pennylane_lightning/src/gates/GateOperation.hpp rename to pennylane_lightning/core/src/gates/GateOperation.hpp index 8f590bd7b8..a253f6ad4d 100644 --- a/pennylane_lightning/src/gates/GateOperation.hpp +++ b/pennylane_lightning/core/src/gates/GateOperation.hpp @@ -1,4 +1,4 @@ -// Copyright 2021 Xanadu Quantum Technologies Inc. +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ * Defines possible operations. */ #pragma once -#include +#include #include #include @@ -63,7 +63,7 @@ enum class GateOperation : uint32_t { DoubleExcitation, DoubleExcitationMinus, DoubleExcitationPlus, - /* Mutli-qubit gates */ + /* Multi-qubit gates */ MultiRZ, /* END (placeholder) */ END diff --git a/pennylane_lightning/core/src/gates/Gates.hpp b/pennylane_lightning/core/src/gates/Gates.hpp new file mode 100644 index 0000000000..fe9add289a --- /dev/null +++ b/pennylane_lightning/core/src/gates/Gates.hpp @@ -0,0 +1,1050 @@ +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "Util.hpp" + +/// @cond DEV +namespace { +using namespace Pennylane::Util; +} // namespace +/// @endcond + +namespace Pennylane::Gates { +/** + * @brief Create a matrix representation of the PauliX gate data in row-major + * format. + * + * @tparam ComplexT Required precision of gate (`float` or `double`). + * @return constexpr std::vector> Return constant expression + * of PauliX data. + */ +template