From 3a8385e01397995a1efa0c6c0977eb94e6e1661d Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 28 Oct 2024 17:11:36 +0000 Subject: [PATCH 01/20] Create v0.39.0 RC branch. --- .github/CHANGELOG.md | 2 +- pennylane_lightning/core/_version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index b863e65705..27f87c6638 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -1,4 +1,4 @@ -# Release 0.39.0-dev +# Release 0.39.0 ### New features since last release diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index e1f82227d5..85c5379edb 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-dev51" +__version__ = "0.39.0-rc0" From a38b0363b98c87de84216ecded9d27cd458944d6 Mon Sep 17 00:00:00 2001 From: Joseph Lee <40768758+josephleekl@users.noreply.github.com> Date: Wed, 30 Oct 2024 09:57:53 -0400 Subject: [PATCH 02/20] Fix LK build path for catalyst shared library (#968) ### Before submitting Please complete the following checklist when submitting a PR: - [X] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [X] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [ ] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** This PR fixes Lightning Kokkos with Catalyst failing due to wrong build path supplied to `setup.py` and lightning kokkos in editable mode. **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** [sc-77123] --- .github/CHANGELOG.md | 5 ++++- .github/workflows/tests_lkcpu_python.yml | 11 +++++++++++ .../lightning_kokkos/lightning_kokkos.py | 2 +- setup.py | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 27f87c6638..a3acaba0e3 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -131,6 +131,9 @@ ### Bug fixes +* Fix `liblightning_kokkos_catalyst.so` not copied to correct build path for editable installation. + [(#968)](https://github.com/PennyLaneAI/pennylane-lightning/pull/968) + * Fix PTM stable latest related to `default.qubit.legacy`. [(#966)](https://github.com/PennyLaneAI/pennylane-lightning/pull/966) @@ -156,7 +159,7 @@ This release contains contributions from (in alphabetical order): -Ali Asadi, Amintor Dusko, Luis Alfredo Nuñez Meneses, Vincent Michaud-Rioux, Lee J. O'Riordan, Mudit Pandey, Shuli Shu, Haochen Paul Wang +Ali Asadi, Amintor Dusko, Joseph Lee, Luis Alfredo Nuñez Meneses, Vincent Michaud-Rioux, Lee J. O'Riordan, Mudit Pandey, Shuli Shu, Haochen Paul Wang --- diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index 6b55a9e734..909831aa83 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -264,6 +264,17 @@ jobs: pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append mv .coverage ${{ github.workspace }}/.coverage-${{ github.job }}-${{ matrix.pl_backend }}-${{ matrix.group }} + - name: Test editable install + run: | + rm -r build + pip uninstall pennylane-lightning pennylane-lightning-kokkos -y + python scripts/configure_pyproject_toml.py + SKIP_COMPILATION=True python -m pip install -e . --config-settings editable_mode=compat + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_BACKEND=${DEVICENAME} python scripts/configure_pyproject_toml.py + python -m pip install -e . --config-settings editable_mode=compat -vv + PL_DEVICE=${DEVICENAME} python -m pytest tests/test_device.py $COVERAGE_FLAGS + - name: Upload test durations uses: actions/upload-artifact@v4 with: diff --git a/pennylane_lightning/lightning_kokkos/lightning_kokkos.py b/pennylane_lightning/lightning_kokkos/lightning_kokkos.py index b30ca1ad21..42a27e969e 100644 --- a/pennylane_lightning/lightning_kokkos/lightning_kokkos.py +++ b/pennylane_lightning/lightning_kokkos/lightning_kokkos.py @@ -534,7 +534,7 @@ def get_c_interface(): # lib.--" # To avoid mismatching the folder name, we search for the shared object instead. # TODO: locate where the naming convention of the folder is decided and replicate it here. - editable_mode_path = package_root.parent.parent / "build" + editable_mode_path = package_root.parent.parent / "build_lightning_kokkos" for path, _, files in os.walk(editable_mode_path): if lib_name in files: lib_location = (Path(path) / lib_name).as_posix() diff --git a/setup.py b/setup.py index 0ef706955f..048219ed2b 100644 --- a/setup.py +++ b/setup.py @@ -161,7 +161,7 @@ def build_extension(self, ext: CMakeExtension): if platform.system() in ["Linux", "Darwin"]: shared_lib_ext = {"Linux": ".so", "Darwin": ".dylib"}[platform.system()] source = os.path.join(f"{extdir}", f"lib{backend}_catalyst{shared_lib_ext}") - destination = os.path.join(os.getcwd(), "build") + destination = os.path.join(os.getcwd(), self.build_temp) shutil.copy(source, destination) with open(os.path.join("pennylane_lightning", "core", "_version.py"), encoding="utf-8") as f: From 0fc077e1c37e530d29b66be60271c7b6ab95e729 Mon Sep 17 00:00:00 2001 From: Shuli Shu <31480676+multiphaseCFD@users.noreply.github.com> Date: Wed, 30 Oct 2024 12:30:35 -0400 Subject: [PATCH 03/20] Integrate LGPU to Catalyst (#928) ### Before submitting Please complete the following checklist when submitting a PR: - [ ] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [x] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [x] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** [SC-73065] **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: ringo-but-quantum Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com> --- .github/CHANGELOG.md | 3 + .github/workflows/tests_gpu_python.yml | 13 + cmake/support_catalyst.cmake | 74 + .../simulators/lightning_gpu/CMakeLists.txt | 1 + .../lightning_gpu/catalyst/CMakeLists.txt | 22 + .../catalyst/LightningGPUObsManager.hpp | 208 ++ .../catalyst/LightningGPUSimulator.cpp | 558 +++++ .../catalyst/LightningGPUSimulator.hpp | 179 ++ .../catalyst/tests/CMakeLists.txt | 39 + .../tests/Test_LightningGPUGradient.cpp | 298 +++ .../tests/Test_LightningGPUMeasures.cpp | 1825 +++++++++++++++++ .../tests/Test_LightningGPUSimulator.cpp | 724 +++++++ .../tests/runner_lightning_gpu_catalyst.cpp | 2 + .../lightning_kokkos/catalyst/CMakeLists.txt | 67 +- .../lightning_gpu/lightning_gpu.py | 43 + .../lightning_gpu/lightning_gpu.toml | 98 +- setup.py | 5 + tests/test_device.py | 15 + 18 files changed, 4062 insertions(+), 112 deletions(-) create mode 100644 cmake/support_catalyst.cmake create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/CMakeLists.txt create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUObsManager.hpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.cpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.hpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/CMakeLists.txt create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUGradient.cpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUMeasures.cpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUSimulator.cpp create mode 100644 pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/runner_lightning_gpu_catalyst.cpp diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index a3acaba0e3..3bc1c174cd 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,6 +2,9 @@ ### New features since last release +* Integrate Lightning-GPU with Catalyst. + [(#928)](https://github.com/PennyLaneAI/pennylane-lightning/pull/928) + * Add `mid-circuit measurements` support to `lightning.gpu`'s single-GPU backend. [(#931)](https://github.com/PennyLaneAI/pennylane-lightning/pull/931) diff --git a/.github/workflows/tests_gpu_python.yml b/.github/workflows/tests_gpu_python.yml index 241b0726e9..ba7ef28dcb 100644 --- a/.github/workflows/tests_gpu_python.yml +++ b/.github/workflows/tests_gpu_python.yml @@ -206,6 +206,19 @@ jobs: run: | DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS + + - name: Test wheels for Lightning-GPU + if : matrix.pl_backend == 'lightning_gpu' + run: | + python -m pip install -r requirements-dev.txt + PL_BACKEND=lightning_qubit python scripts/configure_pyproject_toml.py + SKIP_COMPILATION=True python -m pip install . -vv + + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_BACKEND=${DEVICENAME} python scripts/configure_pyproject_toml.py + python -m build + python -m pip install dist/*.whl --force-reinstall --no-deps + PL_DEVICE=${DEVICENAME} python -m pytest tests/test_device.py $COVERAGE_FLAGS - name: Move coverage file run: | diff --git a/cmake/support_catalyst.cmake b/cmake/support_catalyst.cmake new file mode 100644 index 0000000000..1f5c556222 --- /dev/null +++ b/cmake/support_catalyst.cmake @@ -0,0 +1,74 @@ +############################################################################################### +# This file provides macros to process Catalyst. +############################################################################################### + +# Include this only once +include_guard() + +macro(FindCatalyst target_name) + if(LIGHTNING_CATALYST_SRC_PATH) + if(NOT IS_ABSOLUTE ${LIGHTNING_CATALYST_SRC_PATH}) + message(FATAL_ERROR " LIGHTNING_CATALYST_SRC_PATH=${LIGHTNING_CATALYST_SRC_PATH} must be set to an absolute path") + endif() + if(CATALYST_GIT_TAG) + message(WARN " Setting `LIGHTNING_CATALYST_SRC_PATH=${LIGHTNING_CATALYST_SRC_PATH}` overrides `CATALYST_GIT_TAG=${CATALYST_GIT_TAG}`") + endif() + + # Acquire local git hash and use for CATALYST_GIT_TAG + execute_process(COMMAND git rev-parse --short HEAD + WORKING_DIRECTORY ${LIGHTNING_CATALYST_SRC_PATH} + OUTPUT_VARIABLE CATALYST_GIT_TAG + ) + message(INFO " Building against local Catalyst - path: ${LIGHTNING_CATALYST_SRC_PATH} - GIT TAG: ${CATALYST_GIT_TAG}") + + target_include_directories(${target_name} PUBLIC ${LIGHTNING_CATALYST_SRC_PATH}/runtime/lib/backend/common) + target_include_directories(${target_name} PUBLIC ${LIGHTNING_CATALYST_SRC_PATH}/runtime/include) + + else() + if(NOT CATALYST_GIT_TAG) + set(CATALYST_GIT_TAG "main" CACHE STRING "GIT_TAG value to build Catalyst") + endif() + message(INFO " Building against Catalyst GIT TAG ${CATALYST_GIT_TAG}") + + # Fetching /lib/backend/common hpp headers + set(LIB_BACKEND_COMMON_HEADERS CacheManager.hpp + QubitManager.hpp + Utils.hpp + ) + + foreach(HEADER ${LIB_BACKEND_COMMON_HEADERS}) + string(REGEX REPLACE "\\.[^.]*$" "" HEADER_NAME ${HEADER}) + FetchContent_Declare( + ${HEADER_NAME} + URL https://raw.githubusercontent.com/PennyLaneAI/catalyst/${CATALYST_GIT_TAG}/runtime/lib/backend/common/${HEADER} + DOWNLOAD_NO_EXTRACT True + SOURCE_DIR include + ) + + FetchContent_MakeAvailable(${HEADER_NAME}) + endforeach() + + # Fetching include hpp headers + set(INCLUDE_HEADERS DataView.hpp + Exception.hpp + QuantumDevice.hpp + RuntimeCAPI.h + Types.h + ) + + foreach(HEADER ${INCLUDE_HEADERS}) + string(REGEX REPLACE "\\.[^.]*$" "" HEADER_NAME ${HEADER}) + FetchContent_Declare( + ${HEADER_NAME} + URL https://raw.githubusercontent.com/PennyLaneAI/catalyst/${CATALYST_GIT_TAG}/runtime/include/${HEADER} + DOWNLOAD_NO_EXTRACT True + SOURCE_DIR include + ) + + FetchContent_MakeAvailable(${HEADER_NAME}) + endforeach() + + target_include_directories(${target_name} PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/include) + + endif() +endmacro() diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_gpu/CMakeLists.txt index 618ea7c329..f2fa032454 100644 --- a/pennylane_lightning/core/src/simulators/lightning_gpu/CMakeLists.txt +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/CMakeLists.txt @@ -74,6 +74,7 @@ endif() ############################################################################### set(COMPONENT_SUBDIRS algorithms bindings + catalyst measurements gates observables diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/CMakeLists.txt new file mode 100644 index 0000000000..1b36c3f28d --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/CMakeLists.txt @@ -0,0 +1,22 @@ +cmake_minimum_required(VERSION 3.20) + +project(lightning_gpu_catalyst LANGUAGES CXX) + +set(LGPU_CATALYST_FILES LightningGPUSimulator.cpp CACHE INTERNAL "") +add_library(lightning_gpu_catalyst SHARED ${LGPU_CATALYST_FILES}) + +include(FetchContent) + +include("${pennylane_lightning_SOURCE_DIR}/cmake/support_catalyst.cmake") +FindCatalyst(lightning_gpu_catalyst) + +target_include_directories(lightning_gpu_catalyst INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(lightning_gpu_catalyst PUBLIC lightning_compile_options + lightning_gpu_algorithms + lightning_gpu_measurements +) + +if (BUILD_TESTS) + enable_testing() + add_subdirectory("tests") +endif() diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUObsManager.hpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUObsManager.hpp new file mode 100644 index 0000000000..c71c678056 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUObsManager.hpp @@ -0,0 +1,208 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "Types.h" +#include "Utils.hpp" + +#include "ObservablesGPU.hpp" + +namespace Catalyst::Runtime::Simulator { + +/** + * @brief The LightningGPUObsManager caches observables of a program at + * runtime and maps each one to a const unique index (`int64_t`) in the scope of + * the global context manager. + */ +template class LightningGPUObsManager final { + private: + using StateVectorT = + Pennylane::LightningGPU::StateVectorCudaManaged; + using ObservableT = Pennylane::Observables::Observable; + using ObservablePairType = std::pair, ObsType>; + std::vector observables_{}; + + public: + LightningGPUObsManager() = default; + ~LightningGPUObsManager() = default; + + LightningGPUObsManager(const LightningGPUObsManager &) = delete; + LightningGPUObsManager &operator=(const LightningGPUObsManager &) = delete; + LightningGPUObsManager(LightningGPUObsManager &&) = delete; + LightningGPUObsManager &operator=(LightningGPUObsManager &&) = delete; + + /** + * @brief A helper function to clear constructed observables in the program. + */ + void clear() { this->observables_.clear(); } + + /** + * @brief Check the validity of observable keys. + * + * @param obsKeys The vector of observable keys + * @return bool + */ + [[nodiscard]] auto + isValidObservables(const std::vector &obsKeys) const -> bool { + return std::all_of(obsKeys.begin(), obsKeys.end(), [this](auto i) { + return (i >= 0 && + static_cast(i) < this->observables_.size()); + }); + } + + /** + * @brief Get the constructed observable instance. + * + * @param key The observable key + * @return std::shared_ptr + */ + [[nodiscard]] auto getObservable(ObsIdType key) + -> std::shared_ptr { + RT_FAIL_IF(!this->isValidObservables({key}), "Invalid observable key"); + return std::get<0>(this->observables_[key]); + } + + /** + * @brief Get the number of observables. + * + * @return std::size_t + */ + [[nodiscard]] auto numObservables() const -> std::size_t { + return this->observables_.size(); + } + + /** + * @brief Create and cache a new NamedObs instance. + * + * @param obsId The named observable id of type ObsId + * @param wires The vector of wires the observable acts on + * @return ObsIdType + */ + [[nodiscard]] auto createNamedObs(ObsId obsId, + const std::vector &wires) + -> ObsIdType { + auto &&obs_str = std::string( + Lightning::lookup_obs( + Lightning::simulator_observable_support, obsId)); + + this->observables_.push_back(std::make_pair( + std::make_shared< + Pennylane::LightningGPU::Observables::NamedObs>( + obs_str, wires), + ObsType::Basic)); + return static_cast(this->observables_.size() - 1); + } + + /** + * @brief Create and cache a new HermitianObs instance. + * + * @param matrix The row-wise Hermitian matrix + * @param wires The vector of wires the observable acts on + * @return ObsIdType + */ + [[nodiscard]] auto + createHermitianObs(const std::vector> &matrix, + const std::vector &wires) -> ObsIdType { + std::vector> matrix_k; + matrix_k.reserve(matrix.size()); + for (const auto &elem : matrix) { + matrix_k.push_back(static_cast>(elem)); + } + + this->observables_.push_back(std::make_pair( + std::make_shared>( + Pennylane::LightningGPU::Observables::HermitianObs< + StateVectorT>{matrix_k, wires}), + ObsType::Basic)); + + return static_cast(this->observables_.size() - 1); + } + + /** + * @brief Create and cache a new TensorProd instance. + * + * @param obsKeys The vector of observable keys + * @return ObsIdType + */ + [[nodiscard]] auto + createTensorProdObs(const std::vector &obsKeys) -> ObsIdType { + const auto key_size = obsKeys.size(); + const auto obs_size = this->observables_.size(); + + std::vector> obs_vec; + obs_vec.reserve(key_size); + + for (const auto &key : obsKeys) { + RT_FAIL_IF(static_cast(key) >= obs_size || key < 0, + "Invalid observable key"); + + auto &&[obs, type] = this->observables_[key]; + obs_vec.push_back(obs); + } + + this->observables_.push_back( + std::make_pair(Pennylane::LightningGPU::Observables::TensorProdObs< + StateVectorT>::create(obs_vec), + ObsType::TensorProd)); + + return static_cast(obs_size); + } + + /** + * @brief Create and cache a new HamiltonianObs instance. + * + * @param coeffs The vector of coefficients + * @param obsKeys The vector of observable keys + * @return ObsIdType + */ + [[nodiscard]] auto + createHamiltonianObs(const std::vector &coeffs, + const std::vector &obsKeys) -> ObsIdType { + const auto key_size = obsKeys.size(); + const auto obs_size = this->observables_.size(); + + RT_FAIL_IF( + key_size != coeffs.size(), + "Incompatible list of observables and coefficients; " + "Number of observables and number of coefficients must be equal"); + + std::vector> obs_vec; + obs_vec.reserve(key_size); + + for (auto key : obsKeys) { + RT_FAIL_IF(static_cast(key) >= obs_size || key < 0, + "Invalid observable key"); + + auto &&[obs, type] = this->observables_[key]; + obs_vec.push_back(obs); + } + + this->observables_.push_back(std::make_pair( + std::make_shared>( + Pennylane::LightningGPU::Observables::Hamiltonian( + coeffs, std::move(obs_vec))), + ObsType::Hamiltonian)); + + return static_cast(obs_size); + } +}; +} // namespace Catalyst::Runtime::Simulator diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.cpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.cpp new file mode 100644 index 0000000000..0a51284601 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.cpp @@ -0,0 +1,558 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "LightningGPUSimulator.hpp" + +namespace Catalyst::Runtime::Simulator { + +auto LightningGPUSimulator::AllocateQubit() -> QubitIdType { + const std::size_t num_qubits = this->device_sv->getNumQubits(); + + if (!num_qubits) { + this->device_sv = std::make_unique(1); + return this->qubit_manager.Allocate(num_qubits); + } + + std::vector> data = this->device_sv->getDataVector(); + const std::size_t dsize = data.size(); + data.resize(dsize << 1UL); + + auto src = data.begin(); + std::advance(src, dsize - 1); + + for (auto dst = data.end() - 2; src != data.begin(); + std::advance(src, -1), std::advance(dst, -2)) { + *dst = std::move(*src); + *src = std::complex(.0, .0); + } + + this->device_sv = std::make_unique(data.data(), data.size()); + return this->qubit_manager.Allocate(num_qubits); +} + +auto LightningGPUSimulator::AllocateQubits(std::size_t num_qubits) + -> std::vector { + if (!num_qubits) { + return {}; + } + + // at the first call when num_qubits == 0 + if (!this->GetNumQubits()) { + this->device_sv = std::make_unique(num_qubits); + return this->qubit_manager.AllocateRange(0, num_qubits); + } + + std::vector result(num_qubits); + std::generate_n(result.begin(), num_qubits, + [this]() { return AllocateQubit(); }); + return result; +} + +void LightningGPUSimulator::ReleaseQubit(QubitIdType q) { + this->qubit_manager.Release(q); +} + +void LightningGPUSimulator::ReleaseAllQubits() { + this->qubit_manager.ReleaseAll(); + this->device_sv = std::make_unique(0); // reset the device +} + +auto LightningGPUSimulator::GetNumQubits() const -> std::size_t { + return this->device_sv->getNumQubits(); +} + +void LightningGPUSimulator::StartTapeRecording() { + RT_FAIL_IF(this->tape_recording, "Cannot re-activate the cache manager"); + this->tape_recording = true; + this->cache_manager.Reset(); +} + +void LightningGPUSimulator::StopTapeRecording() { + RT_FAIL_IF(!this->tape_recording, + "Cannot stop an already stopped cache manager"); + this->tape_recording = false; +} + +auto LightningGPUSimulator::CacheManagerInfo() + -> std::tuple, std::vector> { + return {this->cache_manager.getNumOperations(), + this->cache_manager.getNumObservables(), + this->cache_manager.getNumParams(), + this->cache_manager.getOperationsNames(), + this->cache_manager.getObservablesKeys()}; +} + +void LightningGPUSimulator::SetDeviceShots(std::size_t shots) { + this->device_shots = shots; +} + +auto LightningGPUSimulator::GetDeviceShots() const -> std::size_t { + return this->device_shots; +} + +void LightningGPUSimulator::SetDevicePRNG(std::mt19937 *gen) { + this->gen = gen; +} + +/// LCOV_EXCL_START +// TODO: TBD whether to remove this function or add it to coverage test +void LightningGPUSimulator::PrintState() { + using std::cout; + using std::endl; + + const std::size_t num_qubits = this->device_sv->getNumQubits(); + const std::size_t size = Pennylane::Util::exp2(num_qubits); + + std::vector> state(size, {0.0, 0.0}); + + this->device_sv->CopyGpuDataToHost(state.data(), size); + + std::size_t idx = 0; + cout << "*** State-Vector of Size " << size << " ***" << endl; + cout << "["; + for (; idx < size - 1; idx++) { + cout << state[idx] << ", "; + } + cout << state[idx] << "]" << endl; +} +/// LCOV_EXCL_STOP + +void LightningGPUSimulator::SetState(DataView, 1> &data, + std::vector &wires) { + std::size_t expected_wires = static_cast(log2(data.size())); + RT_ASSERT(expected_wires == wires.size()); + std::vector> data_vector(data.begin(), data.end()); + this->device_sv->setStateVector(data_vector.data(), data_vector.size(), + getDeviceWires(wires)); +} + +void LightningGPUSimulator::SetBasisState(DataView &data, + std::vector &wires) { + std::vector basis_state(data.begin(), data.end()); + this->device_sv->setBasisState(basis_state, getDeviceWires(wires)); +} + +auto LightningGPUSimulator::Zero() const -> Result { + return const_cast(&GLOBAL_RESULT_FALSE_CONST); +} + +auto LightningGPUSimulator::One() const -> Result { + return const_cast(&GLOBAL_RESULT_TRUE_CONST); +} + +void LightningGPUSimulator::NamedOperation( + const std::string &name, const std::vector ¶ms, + const std::vector &wires, bool inverse, + const std::vector &controlled_wires, + const std::vector &controlled_values) { + RT_FAIL_IF(!controlled_wires.empty() || !controlled_values.empty(), + "LightningGPU does not support native quantum control."); + + // Check the validity of number of qubits and parameters + RT_FAIL_IF(!isValidQubits(wires), "Given wires do not refer to qubits"); + RT_FAIL_IF(!isValidQubits(controlled_wires), + "Given controlled wires do not refer to qubits"); + + // Convert wires to device wires + auto &&dev_wires = getDeviceWires(wires); + + // Update the state-vector + this->device_sv->applyOperation(name, dev_wires, inverse, params); + + // Update tape caching if required + if (this->tape_recording) { + this->cache_manager.addOperation(name, params, dev_wires, inverse, {}, + {/*controlled_wires*/}, + {/*controlled_values*/}); + } +} + +void LightningGPUSimulator::MatrixOperation( + const std::vector> &matrix, + const std::vector &wires, bool inverse, + const std::vector &controlled_wires, + const std::vector &controlled_values) { + // TODO: Remove when controlled wires API is supported + RT_FAIL_IF(!controlled_wires.empty() || !controlled_values.empty(), + "LightningGPU device does not support native quantum control."); + RT_FAIL_IF(!isValidQubits(wires), "Given wires do not refer to qubits"); + RT_FAIL_IF(!isValidQubits(controlled_wires), + "Given controlled wires do not refer to qubits"); + + // Convert wires to device wires + auto &&dev_wires = getDeviceWires(wires); + + this->device_sv->applyMatrix(matrix, dev_wires, inverse); + + // Update tape caching if required + if (this->tape_recording) { + this->cache_manager.addOperation("QubitUnitary", {}, dev_wires, inverse, + matrix, {/*controlled_wires*/}, + {/*controlled_values*/}); + } +} + +auto LightningGPUSimulator::Observable( + ObsId id, const std::vector> &matrix, + const std::vector &wires) -> ObsIdType { + RT_FAIL_IF(wires.size() > this->GetNumQubits(), "Invalid number of wires"); + RT_FAIL_IF(!isValidQubits(wires), "Invalid given wires"); + + auto &&dev_wires = getDeviceWires(wires); + + if (id == ObsId::Hermitian) { + return this->obs_manager.createHermitianObs(matrix, dev_wires); + } + + return this->obs_manager.createNamedObs(id, dev_wires); +} + +auto LightningGPUSimulator::TensorObservable(const std::vector &obs) + -> ObsIdType { + return this->obs_manager.createTensorProdObs(obs); +} + +auto LightningGPUSimulator::HamiltonianObservable( + const std::vector &coeffs, const std::vector &obs) + -> ObsIdType { + return this->obs_manager.createHamiltonianObs(coeffs, obs); +} + +auto LightningGPUSimulator::Expval(ObsIdType obsKey) -> double { + RT_FAIL_IF(!this->obs_manager.isValidObservables({obsKey}), + "Invalid key for cached observables"); + + // update tape caching + if (this->tape_recording) { + cache_manager.addObservable(obsKey, MeasurementsT::Expval); + } + + auto &&obs = this->obs_manager.getObservable(obsKey); + + Pennylane::LightningGPU::Measures::Measurements m{ + *(this->device_sv)}; + + return device_shots ? m.expval(*obs, device_shots, {}) : m.expval(*obs); +} + +auto LightningGPUSimulator::Var(ObsIdType obsKey) -> double { + RT_FAIL_IF(!this->obs_manager.isValidObservables({obsKey}), + "Invalid key for cached observables"); + + // update tape caching + if (this->tape_recording) { + this->cache_manager.addObservable(obsKey, MeasurementsT::Var); + } + + auto &&obs = this->obs_manager.getObservable(obsKey); + + Pennylane::LightningGPU::Measures::Measurements m{ + *(this->device_sv)}; + + return device_shots ? m.var(*obs, device_shots) : m.var(*obs); +} + +void LightningGPUSimulator::State(DataView, 1> &state) { + const std::size_t num_qubits = this->device_sv->getNumQubits(); + const std::size_t size = Pennylane::Util::exp2(num_qubits); + RT_FAIL_IF(state.size() != size, + "Invalid size for the pre-allocated state vector"); + + // create a temporary buffer to copy the underlying state-vector to + std::vector> buffer(size); + // copy data from device to host + this->device_sv->CopyGpuDataToHost(buffer.data(), size); + + // move data to state leveraging MemRefIter + std::move(buffer.begin(), buffer.end(), state.begin()); +} + +void LightningGPUSimulator::Probs(DataView &probs) { + Pennylane::LightningGPU::Measures::Measurements m{ + *(this->device_sv)}; + auto &&dv_probs = device_shots ? m.probs(device_shots) : m.probs(); + + RT_FAIL_IF(probs.size() != dv_probs.size(), + "Invalid size for the pre-allocated probabilities"); + + std::move(dv_probs.begin(), dv_probs.end(), probs.begin()); +} + +void LightningGPUSimulator::PartialProbs( + DataView &probs, const std::vector &wires) { + const std::size_t numWires = wires.size(); + const std::size_t numQubits = this->GetNumQubits(); + + RT_FAIL_IF(numWires > numQubits, "Invalid number of wires"); + RT_FAIL_IF(!isValidQubits(wires), "Invalid given wires to measure"); + + auto dev_wires = getDeviceWires(wires); + Pennylane::LightningGPU::Measures::Measurements m{ + *(this->device_sv)}; + auto &&dv_probs = + device_shots ? m.probs(dev_wires, device_shots) : m.probs(dev_wires); + + RT_FAIL_IF(probs.size() != dv_probs.size(), + "Invalid size for the pre-allocated partial-probabilities"); + + std::move(dv_probs.begin(), dv_probs.end(), probs.begin()); +} + +std::vector LightningGPUSimulator::GenerateSamples(size_t shots) { + // generate_samples is a member function of the Measures class. + Pennylane::LightningGPU::Measures::Measurements m{ + *(this->device_sv)}; + + if (this->gen) { + return m.generate_samples(shots, (*(this->gen))()); + } + return m.generate_samples(shots); +} + +void LightningGPUSimulator::Sample(DataView &samples, + std::size_t shots) { + auto li_samples = this->GenerateSamples(shots); + + RT_FAIL_IF(samples.size() != li_samples.size(), + "Invalid size for the pre-allocated samples"); + + const std::size_t numQubits = this->GetNumQubits(); + + // The lightning samples are layed out as a single vector of size + // shots*qubits, where each element represents a single bit. The + // corresponding shape is (shots, qubits). Gather the desired bits + // corresponding to the input wires into a bitstring. + auto samplesIter = samples.begin(); + for (std::size_t shot = 0; shot < shots; shot++) { + for (std::size_t wire = 0; wire < numQubits; wire++) { + *(samplesIter++) = + static_cast(li_samples[shot * numQubits + wire]); + } + } +} +void LightningGPUSimulator::PartialSample(DataView &samples, + const std::vector &wires, + std::size_t shots) { + const std::size_t numWires = wires.size(); + const std::size_t numQubits = this->GetNumQubits(); + + RT_FAIL_IF(numWires > numQubits, "Invalid number of wires"); + RT_FAIL_IF(!isValidQubits(wires), "Invalid given wires to measure"); + RT_FAIL_IF(samples.size() != shots * numWires, + "Invalid size for the pre-allocated partial-samples"); + + // get device wires + auto &&dev_wires = getDeviceWires(wires); + + auto li_samples = this->GenerateSamples(shots); + + // The lightning samples are layed out as a single vector of size + // shots*qubits, where each element represents a single bit. The + // corresponding shape is (shots, qubits). Gather the desired bits + // corresponding to the input wires into a bitstring. + auto samplesIter = samples.begin(); + for (std::size_t shot = 0; shot < shots; shot++) { + for (auto wire : dev_wires) { + *(samplesIter++) = + static_cast(li_samples[shot * numQubits + wire]); + } + } +} + +void LightningGPUSimulator::Counts(DataView &eigvals, + DataView &counts, + std::size_t shots) { + const std::size_t numQubits = this->GetNumQubits(); + const std::size_t numElements = 1U << numQubits; + + RT_FAIL_IF(eigvals.size() != numElements || counts.size() != numElements, + "Invalid size for the pre-allocated counts"); + + auto li_samples = this->GenerateSamples(shots); + + // Fill the eigenvalues with the integer representation of the corresponding + // computational basis bitstring. In the future, eigenvalues can also be + // obtained from an observable, hence the bitstring integer is stored as a + // double. + std::iota(eigvals.begin(), eigvals.end(), 0); + std::fill(counts.begin(), counts.end(), 0); + + // The lightning samples are layed out as a single vector of size + // shots*qubits, where each element represents a single bit. The + // corresponding shape is (shots, qubits). Gather the bits of all qubits + // into a bitstring. + for (std::size_t shot = 0; shot < shots; shot++) { + std::bitset basisState; + std::size_t idx = numQubits; + for (std::size_t wire = 0; wire < numQubits; wire++) { + basisState[--idx] = li_samples[shot * numQubits + wire]; + } + counts(static_cast(basisState.to_ulong())) += 1; + } +} + +void LightningGPUSimulator::PartialCounts(DataView &eigvals, + DataView &counts, + const std::vector &wires, + std::size_t shots) { + const std::size_t numWires = wires.size(); + const std::size_t numQubits = this->GetNumQubits(); + const std::size_t numElements = 1U << numWires; + + RT_FAIL_IF(numWires > numQubits, "Invalid number of wires"); + RT_FAIL_IF(!isValidQubits(wires), "Invalid given wires to measure"); + RT_FAIL_IF((eigvals.size() != numElements || counts.size() != numElements), + "Invalid size for the pre-allocated partial-counts"); + + // get device wires + auto &&dev_wires = getDeviceWires(wires); + + auto li_samples = this->GenerateSamples(shots); + + // Fill the eigenvalues with the integer representation of the corresponding + // computational basis bitstring. In the future, eigenvalues can also be + // obtained from an observable, hence the bitstring integer is stored as a + // double. + std::iota(eigvals.begin(), eigvals.end(), 0); + std::fill(counts.begin(), counts.end(), 0); + + // The lightning samples are layed out as a single vector of size + // shots*qubits, where each element represents a single bit. The + // corresponding shape is (shots, qubits). Gather the desired bits + // corresponding to the input wires into a bitstring. + for (std::size_t shot = 0; shot < shots; shot++) { + std::bitset basisState; + std::size_t idx = dev_wires.size(); + for (auto wire : dev_wires) { + basisState[--idx] = li_samples[shot * numQubits + wire]; + } + counts(static_cast(basisState.to_ulong())) += 1; + } +} + +auto LightningGPUSimulator::Measure(QubitIdType wire, + std::optional postselect) + -> Result { + // get a measurement + std::vector wires = {reinterpret_cast(wire)}; + + std::vector probs(1U << wires.size()); + DataView buffer_view(probs); + auto device_shots = GetDeviceShots(); + SetDeviceShots(0); + PartialProbs(buffer_view, wires); + SetDeviceShots(device_shots); + + // It represents the measured result, true for 1, false for 0 + bool mres = Lightning::simulateDraw(probs, postselect, this->gen); + auto dev_wires = getDeviceWires(wires); + this->device_sv->collapse(dev_wires[0], mres ? 1 : 0); + return mres ? this->One() : this->Zero(); +} + +void LightningGPUSimulator::Gradient( + std::vector> &gradients, + const std::vector &trainParams) { + const bool tp_empty = trainParams.empty(); + const std::size_t num_observables = this->cache_manager.getNumObservables(); + const std::size_t num_params = this->cache_manager.getNumParams(); + const std::size_t num_train_params = + tp_empty ? num_params : trainParams.size(); + const std::size_t jac_size = + num_train_params * this->cache_manager.getNumObservables(); + + if (!jac_size) { + return; + } + + RT_FAIL_IF(gradients.size() != num_observables, + "Invalid number of pre-allocated gradients"); + + auto &&obs_callees = this->cache_manager.getObservablesCallees(); + bool is_valid_measurements = + std::all_of(obs_callees.begin(), obs_callees.end(), + [](const auto &m) { return m == MeasurementsT::Expval; }); + RT_FAIL_IF( + !is_valid_measurements, + "Unsupported measurements to compute gradient; " + "Adjoint differentiation method only supports expectation return type"); + + // Create OpsData + auto &&ops_names = this->cache_manager.getOperationsNames(); + auto &&ops_params = this->cache_manager.getOperationsParameters(); + auto &&ops_wires = this->cache_manager.getOperationsWires(); + auto &&ops_inverses = this->cache_manager.getOperationsInverses(); + auto &&ops_matrices = this->cache_manager.getOperationsMatrices(); + auto &&ops_controlled_wires = + this->cache_manager.getOperationsControlledWires(); + auto &&ops_controlled_values = + this->cache_manager.getOperationsControlledValues(); + + const auto &&ops = Pennylane::Algorithms::OpsData( + ops_names, ops_params, ops_wires, ops_inverses, ops_matrices, + ops_controlled_wires, ops_controlled_values); + + // Create the vector of observables + auto &&obs_keys = this->cache_manager.getObservablesKeys(); + std::vector< + std::shared_ptr>> + obs_vec; + obs_vec.reserve(obs_keys.size()); + for (auto idx : obs_keys) { + obs_vec.emplace_back(this->obs_manager.getObservable(idx)); + } + + std::vector all_params; + if (tp_empty) { + all_params.reserve(num_params); + for (std::size_t i = 0; i < num_params; i++) { + all_params.push_back(i); + } + } + + // construct the Jacobian data + Pennylane::Algorithms::JacobianData tape{ + num_params, + this->device_sv->getLength(), + this->device_sv->getData(), + obs_vec, + ops, + tp_empty ? all_params : trainParams}; + + Pennylane::LightningGPU::Algorithms::AdjointJacobian adj; + std::vector jacobian(jac_size, 0); + adj.adjointJacobian(std::span{jacobian}, tape, + /* ref_data */ *this->device_sv, + /* apply_operations */ false); + + std::vector cur_buffer(num_train_params); + auto begin_loc_iter = jacobian.begin(); + for (std::size_t obs_idx = 0; obs_idx < num_observables; obs_idx++) { + RT_ASSERT(begin_loc_iter != jacobian.end()); + RT_ASSERT(num_train_params <= gradients[obs_idx].size()); + std::move(begin_loc_iter, begin_loc_iter + num_train_params, + cur_buffer.begin()); + std::move(cur_buffer.begin(), cur_buffer.end(), + gradients[obs_idx].begin()); + begin_loc_iter += num_train_params; + } +} + +} // namespace Catalyst::Runtime::Simulator + +/// LCOV_EXCL_START +GENERATE_DEVICE_FACTORY(LightningGPUSimulator, + Catalyst::Runtime::Simulator::LightningGPUSimulator); +/// LCOV_EXCL_STOP diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.hpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.hpp new file mode 100644 index 0000000000..edf99c9f61 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/LightningGPUSimulator.hpp @@ -0,0 +1,179 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file LightningGPUSimulator.hpp + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "AdjointJacobianGPU.hpp" +#include "MeasurementsGPU.hpp" +#include "StateVectorCudaManaged.hpp" + +#include "CacheManager.hpp" +#include "Exception.hpp" +#include "LightningGPUObsManager.hpp" +#include "QuantumDevice.hpp" +#include "QubitManager.hpp" +#include "Utils.hpp" + +namespace Catalyst::Runtime::Simulator { +/** + * @brief LGPU state vector class wrapper for Catalyst. + * This class inherits from the QuantumDevice class defined in Catalyst. + * More info: + * https://github.com/PennyLaneAI/catalyst/blob/main/runtime/include/QuantumDevice.hpp + * + */ +class LightningGPUSimulator final : public Catalyst::Runtime::QuantumDevice { + private: + using StateVectorT = + Pennylane::LightningGPU::StateVectorCudaManaged; + + // static constants for RESULT values + static constexpr bool GLOBAL_RESULT_TRUE_CONST = true; + static constexpr bool GLOBAL_RESULT_FALSE_CONST = false; + + Catalyst::Runtime::QubitManager qubit_manager{}; + Catalyst::Runtime::CacheManager> cache_manager{}; + bool tape_recording{false}; + + std::size_t device_shots; + + std::mt19937 *gen{nullptr}; + + std::unique_ptr device_sv = std::make_unique(0); + LightningGPUObsManager obs_manager{}; + + inline auto isValidQubit(QubitIdType wire) -> bool { + return this->qubit_manager.isValidQubitId(wire); + } + + inline auto isValidQubits(const std::vector &wires) -> bool { + return std::all_of(wires.begin(), wires.end(), [this](QubitIdType w) { + return this->isValidQubit(w); + }); + } + + inline auto isValidQubits(std::size_t numWires, const QubitIdType *wires) + -> bool { + return std::all_of(wires, wires + numWires, [this](QubitIdType w) { + return this->isValidQubit(w); + }); + } + + inline auto getDeviceWires(const std::vector &wires) + -> std::vector { + std::vector res; + res.reserve(wires.size()); + std::transform( + wires.begin(), wires.end(), std::back_inserter(res), + [this](auto w) { return this->qubit_manager.getDeviceId(w); }); + return res; + } + + auto GenerateSamples(size_t shots) -> std::vector; + + public: + explicit LightningGPUSimulator(const std::string &kwargs = "{}") { + auto &&args = Catalyst::Runtime::parse_kwargs(kwargs); + device_shots = args.contains("shots") + ? static_cast(std::stoll(args["shots"])) + : 0; + } + ~LightningGPUSimulator() = default; + + LightningGPUSimulator(const LightningGPUSimulator &) = delete; + LightningGPUSimulator &operator=(const LightningGPUSimulator &) = delete; + LightningGPUSimulator(LightningGPUSimulator &&) = delete; + LightningGPUSimulator &operator=(LightningGPUSimulator &&) = delete; + + auto AllocateQubit() -> QubitIdType override; + auto AllocateQubits(std::size_t num_qubits) + -> std::vector override; + void ReleaseQubit(QubitIdType q) override; + void ReleaseAllQubits() override; + [[nodiscard]] auto GetNumQubits() const -> std::size_t override; + void StartTapeRecording() override; + void StopTapeRecording() override; + void SetDeviceShots(std::size_t shots) override; + void SetDevicePRNG(std::mt19937 *) override; + void SetState(DataView, 1> &, + std::vector &) override; + void SetBasisState(DataView &, + std::vector &) override; + [[nodiscard]] auto GetDeviceShots() const -> std::size_t override; + void PrintState() override; + [[nodiscard]] auto Zero() const -> Result override; + [[nodiscard]] auto One() const -> Result override; + + void + NamedOperation(const std::string &name, const std::vector ¶ms, + const std::vector &wires, bool inverse = false, + const std::vector &controlled_wires = {}, + const std::vector &controlled_values = {}) override; + using Catalyst::Runtime::QuantumDevice::MatrixOperation; + void + MatrixOperation(const std::vector> &matrix, + const std::vector &wires, bool inverse = false, + const std::vector &controlled_wires = {}, + const std::vector &controlled_values = {}) override; + auto Observable(ObsId id, const std::vector> &matrix, + const std::vector &wires) + -> ObsIdType override; + auto TensorObservable(const std::vector &obs) + -> ObsIdType override; + auto HamiltonianObservable(const std::vector &coeffs, + const std::vector &obs) + -> ObsIdType override; + auto Expval(ObsIdType obsKey) -> double override; + auto Var(ObsIdType obsKey) -> double override; + void State(DataView, 1> &state) override; + void Probs(DataView &probs) override; + void PartialProbs(DataView &probs, + const std::vector &wires) override; + void Sample(DataView &samples, std::size_t shots) override; + void PartialSample(DataView &samples, + const std::vector &wires, + std::size_t shots) override; + void Counts(DataView &eigvals, DataView &counts, + std::size_t shots) override; + void PartialCounts(DataView &eigvals, + DataView &counts, + const std::vector &wires, + std::size_t shots) override; + auto Measure(QubitIdType wire, + std::optional postselect = std::nullopt) + -> Result override; + void Gradient(std::vector> &gradients, + const std::vector &trainParams) override; + + auto CacheManagerInfo() + -> std::tuple, std::vector>; +}; + +} // namespace Catalyst::Runtime::Simulator diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/CMakeLists.txt new file mode 100644 index 0000000000..eedce4233b --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 3.20) + +project(lightning_gpu_catalyst_tests) + +# Default build type for test code is Debug +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Debug) +endif() + +include("${pennylane_lightning_SOURCE_DIR}/cmake/support_tests.cmake") +FetchAndIncludeCatch() + +################################################################################ +# Define library +################################################################################ + +add_library(lightning_gpu_catalyst_tests INTERFACE) +target_link_libraries(lightning_gpu_catalyst_tests INTERFACE Catch2::Catch2 + lightning_gpu_catalyst +) + +ProcessTestOptions(lightning_gpu_catalyst_tests) + +target_sources(lightning_gpu_catalyst_tests INTERFACE runner_lightning_gpu_catalyst.cpp) + +################################################################################ +# Define targets +################################################################################ +set(TEST_SOURCES Test_LightningGPUSimulator.cpp + Test_LightningGPUMeasures.cpp + Test_LightningGPUGradient.cpp +) + +add_executable(lightning_gpu_catalyst_tests_runner ${TEST_SOURCES}) +target_link_libraries(lightning_gpu_catalyst_tests_runner PRIVATE lightning_gpu_catalyst_tests) + +catch_discover_tests(lightning_gpu_catalyst_tests_runner) + +install(TARGETS lightning_gpu_catalyst_tests_runner DESTINATION bin) diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUGradient.cpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUGradient.cpp new file mode 100644 index 0000000000..891559ec84 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUGradient.cpp @@ -0,0 +1,298 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "LightningGPUSimulator.hpp" +#include "catch2/catch.hpp" + +/// @cond DEV +namespace { +// MemRef type definition (Helper) +template struct MemRefT { + T *data_allocated; + T *data_aligned; + std::size_t offset; + std::size_t sizes[R]; + std::size_t strides[R]; +}; +using namespace Catalyst::Runtime::Simulator; +using LGPUSimulator = LightningGPUSimulator; +} // namespace +/// @endcond + +TEST_CASE("Zero qubits. Zero parameters", "[Gradient]") { + std::unique_ptr LGPUsim = std::make_unique(); + + std::vector> gradients; + std::vector Qs = LGPUsim->AllocateQubits(0); + REQUIRE_NOTHROW(LGPUsim->Gradient(gradients, {})); +} + +TEST_CASE("Test Gradient with zero number of obs", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + std::vector buffer(1); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0}; + + const auto q = sim->AllocateQubit(); + + sim->StartTapeRecording(); + + sim->NamedOperation("S", {}, {q}, false); + sim->NamedOperation("T", {}, {q}, false); + + REQUIRE_NOTHROW(sim->Gradient(gradients, trainParams)); + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with Var", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + std::vector buffer(1); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0}; + + const auto q = sim->AllocateQubit(); + + sim->StartTapeRecording(); + + sim->NamedOperation("RX", {-M_PI / 7}, {q}, false); + auto pz = sim->Observable(ObsId::PauliZ, {}, {q}); + sim->Var(pz); + + REQUIRE_THROWS_WITH( + sim->Gradient(gradients, trainParams), + Catch::Contains("Unsupported measurements to compute gradient")); + + REQUIRE_THROWS_WITH( + sim->Gradient(gradients, {}), + Catch::Contains("Unsupported measurements to compute gradient")); + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with Op=RX, Obs=Z", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + std::vector buffer(1); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0}; + + const auto q = sim->AllocateQubit(); + + sim->StartTapeRecording(); + + sim->NamedOperation("RX", {-M_PI / 7}, {q}, false); + auto obs = sim->Observable(ObsId::PauliZ, {}, {q}); + sim->Expval(obs); + + sim->Gradient(gradients, trainParams); + CHECK(-sin(-M_PI / 7) == Approx(buffer[0]).margin(1e-5)); + + // Update buffer + buffer[0] = 0.0; + + sim->Gradient(gradients, {}); + CHECK(-sin(-M_PI / 7) == Approx(buffer[0]).margin(1e-5)); + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with Op=RX, Obs=Hermitian", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + std::vector buffer(1); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0}; + + constexpr double expected{0.2169418696}; + + const auto q = sim->AllocateQubit(); + + sim->StartTapeRecording(); + + sim->NamedOperation("RX", {-M_PI / 7}, {q}, false); + + std::vector> mat{ + {1.0, 0.0}, {0.0, 0.0}, {2.0, 0.0}, {0.0, 0.0}}; + + auto obs = sim->Observable(ObsId::Hermitian, mat, {q}); + + sim->Expval(obs); + + sim->Gradient(gradients, trainParams); + CHECK(expected == Approx(buffer[0]).margin(1e-5)); + + // Update buffer + buffer[0] = 0.0; + + sim->Gradient(gradients, {}); + CHECK(expected == Approx(buffer[0]).margin(1e-5)); + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with Op=[RX,RX,RX,CZ], Obs=[Z,Z,Z]", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + constexpr std::size_t num_parms = 3; + + std::vector buffer_p0(num_parms); + std::vector buffer_p1(num_parms); + std::vector buffer_p2(num_parms); + std::vector> gradients; + gradients.emplace_back(buffer_p0); + gradients.emplace_back(buffer_p1); + gradients.emplace_back(buffer_p2); + + const std::vector trainParams{0, 1, 2}; + + const std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + const std::vector expected{-sin(param[0]), -sin(param[1]), + -sin(param[2])}; + + const auto Qs = sim->AllocateQubits(num_parms); + + sim->StartTapeRecording(); + + sim->NamedOperation("RX", {param[0]}, {Qs[0]}, false); + sim->NamedOperation("RX", {param[1]}, {Qs[1]}, false); + sim->NamedOperation("RX", {param[2]}, {Qs[2]}, false); + sim->NamedOperation("CZ", {}, {Qs[0], Qs[2]}, false); + + std::vector> mat{ + {1.0, 0.0}, {0.0, 0.0}, {2.0, 0.0}, {0.0, 0.0}}; + + auto obs0 = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + auto obs1 = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + auto obs2 = sim->Observable(ObsId::PauliZ, {}, {Qs[2]}); + + sim->Expval(obs0); + sim->Expval(obs1); + sim->Expval(obs2); + + sim->Gradient(gradients, trainParams); + CHECK(expected[0] == Approx(buffer_p0[0]).margin(1e-5)); + CHECK(expected[1] == Approx(buffer_p1[1]).margin(1e-5)); + CHECK(expected[2] == Approx(buffer_p2[2]).margin(1e-5)); + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with Op=Mixed, Obs=Hamiltonian([Z@Z, H], {0.2, 0.6})", + "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + constexpr std::size_t num_parms = 6; + + std::vector buffer(num_parms); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0, 1, 2, 3, 4, 5}; + + const std::vector param{-M_PI / 7, M_PI / 5, 2 * M_PI / 3}; + const std::vector expected{0.0, -0.2493761627, 0.0, + 0.0, -0.1175570505, 0.0}; + + const auto Qs = sim->AllocateQubits(3); + + sim->StartTapeRecording(); + + sim->NamedOperation("RZ", {param[0]}, {Qs[0]}, false); + sim->NamedOperation("RY", {param[1]}, {Qs[0]}, false); + sim->NamedOperation("RZ", {param[2]}, {Qs[0]}, false); + sim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + sim->NamedOperation("CNOT", {}, {Qs[1], Qs[2]}, false); + sim->NamedOperation("RZ", {param[0]}, {Qs[1]}, false); + sim->NamedOperation("RY", {param[1]}, {Qs[1]}, false); + sim->NamedOperation("RZ", {param[2]}, {Qs[1]}, false); + + std::vector> mat{ + {1.0, 0.0}, {0.0, 0.0}, {2.0, 0.0}, {0.0, 0.0}}; + + auto obs0 = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + auto obs1 = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + auto obs2 = sim->TensorObservable({obs0, obs1}); + auto obs3 = sim->Observable(ObsId::Hadamard, {}, {Qs[2]}); + auto obs4 = sim->HamiltonianObservable({0.2, 0.6}, {obs2, obs3}); + + sim->Expval(obs4); + + sim->Gradient(gradients, trainParams); + + for (std::size_t i = 0; i < num_parms; i++) { + CAPTURE(i); + CHECK(expected[i] == Approx(buffer[i]).margin(1e-5)); + buffer[i] = 0.0; + } + + sim->Gradient(gradients, {}); + + for (std::size_t i = 0; i < num_parms; i++) { + CAPTURE(i); + CHECK(expected[i] == Approx(buffer[i]).margin(1e-5)); + } + + sim->StopTapeRecording(); +} + +TEST_CASE("Test Gradient with QubitUnitary", "[Gradient]") { + std::unique_ptr sim = std::make_unique(); + + std::vector buffer(1); + std::vector> gradients; + gradients.emplace_back(buffer); + + const std::vector trainParams{0}; + + constexpr double expected{-0.8611041863}; + + const std::vector> matrix{ + {-0.6709485262524046, -0.6304426335363695}, + {-0.14885403153998722, 0.3608498832392019}, + {-0.2376311670004963, 0.3096798175687841}, + {-0.8818365947322423, -0.26456390390903695}, + }; + + const auto Qs = sim->AllocateQubits(1); + + sim->StartTapeRecording(); + + sim->NamedOperation("RX", {-M_PI / 7}, {Qs[0]}, false); + sim->MatrixOperation(matrix, {Qs[0]}, false); + + auto obs = sim->Observable(ObsId::PauliY, {}, {Qs[0]}); + sim->Expval(obs); + + sim->Gradient(gradients, trainParams); + CHECK(expected == Approx(buffer[0]).margin(1e-5)); + + // Update buffer + buffer[0] = 0.0; + + sim->Gradient(gradients, {}); + CHECK(expected == Approx(buffer[0]).margin(1e-5)); + + sim->StopTapeRecording(); +} diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUMeasures.cpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUMeasures.cpp new file mode 100644 index 0000000000..93ac7b3a2d --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUMeasures.cpp @@ -0,0 +1,1825 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "CacheManager.hpp" +#include "LightningGPUSimulator.hpp" +#include "QuantumDevice.hpp" +#include "Types.h" +#include "Utils.hpp" +#include "catch2/catch.hpp" +#include "cmath" + +/// @cond DEV +namespace { +// MemRef type definition (Helper) +// TODO: Move this to a common header file +template struct MemRefT { + T *data_allocated; + T *data_aligned; + std::size_t offset; + std::size_t sizes[R]; + std::size_t strides[R]; +}; +using namespace Catalyst::Runtime::Simulator; +using LGPUSimulator = LightningGPUSimulator; +} // namespace +/// @endcond + +TEST_CASE("NameObs test with invalid number of wires", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + REQUIRE_THROWS_WITH(sim->Observable(ObsId::PauliX, {}, {1}), + Catch::Contains("Invalid number of wires")); +} + +TEST_CASE("NameObs test with invalid given wires for NamedObs", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + sim->AllocateQubit(); + + REQUIRE_THROWS_WITH(sim->Observable(ObsId::PauliX, {}, {1}), + Catch::Contains("Invalid given wires")); +} + +TEST_CASE("HermitianObs test with invalid number of wires", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + REQUIRE_THROWS_WITH(sim->Observable(ObsId::Hermitian, {}, {1}), + Catch::Contains("Invalid number of wires")); +} + +TEST_CASE("HermitianObs test with invalid given wires for HermitianObs", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + sim->AllocateQubit(); + + REQUIRE_THROWS_WITH(sim->Observable(ObsId::Hermitian, {}, {1}), + Catch::Contains("Invalid given wires")); +} + +TEST_CASE("Check an unsupported observable", "[Measures]") { + REQUIRE_THROWS_WITH( + Lightning::lookup_obs( + Lightning::simulator_observable_support, static_cast(10)), + Catch::Contains( + "The given observable is not supported by the simulator")); +} + +TEST_CASE("Measurement collapse test with 2 wires", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + constexpr std::size_t n = 2; + std::vector Qs = sim->AllocateQubits(n); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + auto m = sim->Measure(Qs[0]); + std::vector> state(1U << sim->GetNumQubits()); + DataView, 1> view(state); + sim->State(view); + + // LCOV_EXCL_START + // This is conditional over the measurement result + if (*m) { + CHECK(pow(std::abs(std::real(state[2])), 2) + + pow(std::abs(std::imag(state[2])), 2) == + Approx(1.0).margin(1e-5)); + } else { + CHECK(pow(std::abs(std::real(state[0])), 2) + + pow(std::abs(std::imag(state[0])), 2) == + Approx(1.0).margin(1e-5)); + } + // LCOV_EXCL_STOP +} + +TEST_CASE("Measurement collapse concrete logical qubit difference", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + constexpr std::size_t n = 1; + // The first time an array is allocated, logical and concrete qubits + // are the same. + std::vector Qs = sim->AllocateQubits(n); + sim->ReleaseAllQubits(); + + // Now in this the concrete qubits are shifted by n. + Qs = sim->AllocateQubits(n); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->Measure(Qs[0]); + std::vector> state(1U << sim->GetNumQubits()); + DataView, 1> view(state); + sim->State(view); + + // LCOV_EXCL_START + bool is_zero = pow(std::abs(std::real(state[0])), 2) + + pow(std::abs(std::imag(state[0])), 2) == + Approx(1.0).margin(1e-5); + bool is_one = pow(std::abs(std::real(state[1])), 2) + + pow(std::abs(std::imag(state[1])), 2) == + Approx(1.0).margin(1e-5); + bool is_valid = is_zero ^ is_one; + CHECK(is_valid); + // LCOV_EXCL_STOP +} + +TEST_CASE("Mid-circuit measurement naive test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + intptr_t q; + + q = sim->AllocateQubit(); + + sim->NamedOperation("PauliX", {}, {q}, false); + + auto m = sim->Measure(q); + + CHECK(*m); +} + +TEST_CASE("Mid-circuit measurement test with postselect = 0", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + intptr_t q; + + q = sim->AllocateQubit(); + + sim->NamedOperation("Hadamard", {}, {q}, false); + + auto m = sim->Measure(q, 0); + + CHECK(*m == 0); +} + +TEST_CASE("Mid-circuit measurement test with postselect = 1", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + intptr_t q; + + q = sim->AllocateQubit(); + + sim->NamedOperation("Hadamard", {}, {q}, false); + + auto m = sim->Measure(q, 1); + + CHECK(*m == 1); +} + +TEST_CASE("Mid-circuit measurement test with invalid postselect value", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + intptr_t q; + + q = sim->AllocateQubit(); + + sim->NamedOperation("Hadamard", {}, {q}, false); + + REQUIRE_THROWS_WITH(sim->Measure(q, 2), + Catch::Contains("Invalid postselect value")); +} + +TEST_CASE("Expval(ObsT) test with invalid key for cached observables", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + REQUIRE_THROWS_WITH(sim->Expval(0), + Catch::Contains("Invalid key for cached observables")); +} + +TEST_CASE("Expval(NamedObs) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + CHECK(sim->Expval(px) == Approx(1.0).margin(1e-5)); + CHECK(sim->Expval(py) == Approx(.0).margin(1e-5)); + CHECK(sim->Expval(pz) == Approx(-1.0).margin(1e-5)); +} + +TEST_CASE("Expval(NamedObs) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + CHECK(sim->Expval(px) == Approx(0.0).margin(5e-2)); + CHECK(sim->Expval(py) == Approx(0.0).margin(5e-2)); + CHECK(sim->Expval(pz) == Approx(-1.0).margin(5e-2)); +} + +TEST_CASE("Expval(HermitianObs) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + + CHECK(sim->Expval(h1) == Approx(.0).margin(1e-5)); + CHECK(sim->Expval(h2) == Approx(.0).margin(1e-5)); +} + +TEST_CASE("Expval(HermitianObs) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); + +#ifndef PL_USE_LAPACK + REQUIRE_THROWS_WITH( + sim->Expval(h1), + Catch::Contains( + "Hermitian observables with shot measurement are not supported")); +#else + CHECK(sim->Expval(h1) == Approx(0.0).margin(1e-5)); +#endif +} + +TEST_CASE("Var(HermitianObs) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); +#ifndef PL_USE_LAPACK + REQUIRE_THROWS_WITH( + sim->Var(h1), + Catch::Contains( + "Hermitian observables with shot measurement are not supported")); +#else + CHECK(sim->Var(h1) == Approx(0.0).margin(1e-5)); +#endif +} + +TEST_CASE("Expval(TensorProd(NamedObs)) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpx = sim->TensorObservable({px}); + ObsIdType tpy = sim->TensorObservable({py}); + ObsIdType tpz = sim->TensorObservable({pz}); + + CHECK(sim->Expval(tpx) == Approx(1.0).margin(1e-5)); + CHECK(sim->Expval(tpy) == Approx(.0).margin(1e-5)); + CHECK(sim->Expval(tpz) == Approx(-1.0).margin(1e-5)); +} + +TEST_CASE("Expval(TensorProd(NamedObs)) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpx = sim->TensorObservable({px}); + ObsIdType tpy = sim->TensorObservable({py}); + ObsIdType tpz = sim->TensorObservable({pz}); + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + CHECK(sim->Expval(tpx) == Approx(1.0).margin(5e-2)); + CHECK(sim->Expval(tpy) == Approx(.0).margin(5e-2)); + CHECK(sim->Expval(tpz) == Approx(-1.0).margin(5e-2)); +} + +TEST_CASE("Expval(TensorProd(NamedObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpxy = sim->TensorObservable({px, py}); + ObsIdType tpxz = sim->TensorObservable({px, pz}); + + REQUIRE_THROWS_WITH( + sim->TensorObservable({px, py, pz}), + Catch::Contains("All wires in observables must be disjoint.")); + + CHECK(sim->Expval(tpxy) == Approx(0.0).margin(1e-5)); + CHECK(sim->Expval(tpxz) == Approx(-1.0).margin(1e-5)); +} + +TEST_CASE("Expval(TensorProd(NamedObs[])) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + std::unique_ptr sim0 = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + Qs.push_back(sim0->AllocateQubit()); + } + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + sim0->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim0->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim0->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType tpxy = sim->TensorObservable({px, py}); + + ObsIdType px0 = sim0->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py0 = sim0->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType tpxy0 = sim0->TensorObservable({px0, py0}); + + CHECK(sim->Expval(tpxy) == Approx(sim0->Expval(tpxy0)).margin(5e-2)); +} + +TEST_CASE("Expval(TensorProd(HermitianObs))", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tph1 = sim->TensorObservable({h1}); + ObsIdType tph2 = sim->TensorObservable({h2}); + + CHECK(sim->Expval(tph1) == Approx(.0).margin(1e-5)); + CHECK(sim->Expval(tph2) == Approx(.0).margin(1e-5)); +} + +TEST_CASE("Expval(TensorProd(HermitianObs[]))", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(4, {1.0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tp = sim->TensorObservable({h1, h2}); + + CHECK(sim->Expval(tp) == Approx(.0).margin(1e-5)); +} + +TEST_CASE("Expval(TensorProd(Obs[]))", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tp = sim->TensorObservable({px, h, pz}); + + CHECK(sim->Expval(tp) == Approx(-3.0).margin(1e-5)); +} + +TEST_CASE("Expval(Tensor(Hamiltonian(NamedObs[]), NamedObs)) test", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + ObsIdType hxy = sim->HamiltonianObservable({0.4, 0.8}, {px, py}); + ObsIdType thz = sim->TensorObservable({hxy, pz}); + + CHECK(sim->Expval(thz) == Approx(-0.4).margin(1e-5)); +} + +TEST_CASE("Expval(Tensor(HermitianObs, Hamiltonian()) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 3; + std::vector Qs = sim->AllocateQubits(n); + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType her = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[1]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[2]}); + ObsIdType hxy = sim->HamiltonianObservable({0.4, 0.8}, {px, py}); + ObsIdType ten = sim->TensorObservable({her, hxy}); + + CHECK(sim->Expval(ten) == Approx(0.0).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian(NamedObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hxyz = sim->HamiltonianObservable({0.4, 0.8, 0.2}, {px, py, pz}); + + CHECK(sim->Expval(hxyz) == Approx(0.2).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian(NamedObs[])) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hxyz = sim->HamiltonianObservable({0.4, 0.8, 0.2}, {px, py, pz}); + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + CHECK(sim->Expval(hxyz) == Approx(0.2).margin(5e-2)); +} + +TEST_CASE("Expval(Hamiltonian(TensorObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpxy = sim->TensorObservable({px, py}); + ObsIdType tpxz = sim->TensorObservable({px, pz}); + ObsIdType hxyz = sim->HamiltonianObservable({0.2, 0.6}, {tpxy, tpxz}); + + CHECK(sim->Expval(hxyz) == Approx(-.6).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian(Hermitian[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hxhz = sim->HamiltonianObservable({0.2, 0.3, 0.6}, {px, h, pz}); + + CHECK(sim->Expval(hxhz) == Approx(0.5).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian({TensorProd, Hermitian}[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tp = sim->TensorObservable({px, pz}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {h, tp}); + + CHECK(sim->Expval(hhtp) == Approx(1.2).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian({Hamiltonian, Hermitian}[])) test", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hp = sim->HamiltonianObservable({0.2, 0.6}, {px, pz}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {h, hp}); + + CHECK(sim->Expval(hhtp) == Approx(1.38).margin(1e-5)); +} + +TEST_CASE("Expval(Hamiltonian({Hamiltonian(Hamiltonian), Hermitian}[])) test", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hp = sim->HamiltonianObservable({0.2, 0.6}, {px, pz}); + ObsIdType hhp = sim->HamiltonianObservable({1}, {hp}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {hhp, h}); + + CHECK(sim->Expval(hhtp) == Approx(0.7).margin(1e-5)); +} + +TEST_CASE("Var(NamedObs) test with numWires=4", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[0]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[3]}); + + CHECK(sim->Var(px) == Approx(.0).margin(1e-5)); + CHECK(sim->Var(py) == Approx(1.0).margin(1e-5)); + CHECK(sim->Var(pz) == Approx(.0).margin(1e-5)); +} + +TEST_CASE("Var(NamedObs) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 5000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[0]}); + + CHECK(sim->Var(py) == Approx(1.0).margin(5e-2)); +} + +TEST_CASE("Var(HermitianObs) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + + CHECK(sim->Var(h1) == Approx(.0).margin(1e-5)); + CHECK(sim->Var(h2) == Approx(1.0).margin(1e-5)); +} + +TEST_CASE("Var(TensorProd(NamedObs)) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpx = sim->TensorObservable({px}); + ObsIdType tpy = sim->TensorObservable({py}); + ObsIdType tpz = sim->TensorObservable({pz}); + + CHECK(sim->Var(tpx) == Approx(.0).margin(1e-5)); + CHECK(sim->Var(tpy) == Approx(1.0).margin(1e-5)); + CHECK(sim->Var(tpz) == Approx(.0).margin(1e-5)); +} + +TEST_CASE("Var(TensorProd(NamedObs)) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpx = sim->TensorObservable({px}); + ObsIdType tpz = sim->TensorObservable({pz}); + + CHECK(sim->Var(tpx) == Approx(.0).margin(5e-2)); + CHECK(sim->Var(tpz) == Approx(.0).margin(5e-2)); +} + +TEST_CASE("Var(TensorProd(NamedObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpxy = sim->TensorObservable({px, py}); + ObsIdType tpxz = sim->TensorObservable({px, pz}); + + CHECK(sim->Var(tpxy) == Approx(1.0).margin(1e-5)); + CHECK(sim->Var(tpxz) == Approx(0.0).margin(1e-5)); +} + +TEST_CASE("Var(TensorProd(HermitianObs)) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(16, {0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[0], Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tph1 = sim->TensorObservable({h1}); + ObsIdType tph2 = sim->TensorObservable({h2}); + + CHECK(sim->Var(tph1) == Approx(.0).margin(1e-5)); + CHECK(sim->Var(tph2) == Approx(1.0).margin(1e-5)); +} + +TEST_CASE("Var(TensorProd(HermitianObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 2; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat1(4, {1.0, 0}); + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType h1 = sim->Observable(ObsId::Hermitian, mat1, {Qs[1]}); + ObsIdType h2 = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tp = sim->TensorObservable({h1, h2}); + + CHECK(sim->Var(tp) == Approx(2.0).margin(1e-5)); +} + +TEST_CASE("Var(TensorProd(Obs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType tp = sim->TensorObservable({px, h, pz}); + + CHECK(sim->Var(tp) == Approx(4.0).margin(1e-5)); +} + +TEST_CASE("Var(Tensor(Hamiltonian(NamedObs[]), NamedObs)) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + ObsIdType hxy = sim->HamiltonianObservable({0.4, 0.8}, {px, py}); + ObsIdType thz = sim->TensorObservable({hxy, pz}); + + CHECK(sim->Var(thz) == Approx(0.64).margin(1e-5)); +} + +TEST_CASE("Var(Tensor(NamedObs[])) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 5000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + ObsIdType thz = sim->TensorObservable({px, py, pz}); + + CHECK(sim->Var(thz) == Approx(0.99998976).margin(5e-2)); +} + +TEST_CASE("Var(Tensor(NamedObs[])) shots test without gates " + "(influenced from a bug in Lightning)", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 3; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 5000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[0]}); + ObsIdType thz = sim->TensorObservable({px, py, pz}); + + CHECK(sim->Var(thz) == Approx(0.99966144).margin(5e-2)); +} + +TEST_CASE("Var(Tensor(HermitianObs, Hamiltonian()) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 3; + std::vector Qs = sim->AllocateQubits(n); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + std::vector> mat2{ + {1.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}, {0.0, 0.0}}; + + ObsIdType her = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[1]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[2]}); + ObsIdType hxy = sim->HamiltonianObservable({0.4, 0.8}, {px, py}); + ObsIdType ten = sim->TensorObservable({her, hxy}); + + CHECK(sim->Var(ten) == Approx(0.8).margin(1e-5)); +} + +TEST_CASE("Var(Tensor(HermitianObs, Hamiltonian()) shots test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 3; + std::vector Qs = sim->AllocateQubits(n); + + constexpr std::size_t num_shots = 5000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[1]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[2]}); + ObsIdType hxy = sim->HamiltonianObservable({0.4, 0.8}, {px, py}); + + CHECK(sim->Var(hxy) == Approx(0.8).margin(5e-2)); +} + +TEST_CASE("Var(Hamiltonian(NamedObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hxyz = sim->HamiltonianObservable({0.4, 0.8, 0.2}, {px, py, pz}); + + CHECK(sim->Var(hxyz) == Approx(0.64).margin(1e-5)); +} + +TEST_CASE("Var(Hamiltonian(TensorObs[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType py = sim->Observable(ObsId::PauliY, {}, {Qs[1]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tpxy = sim->TensorObservable({px, py}); + ObsIdType tpxz = sim->TensorObservable({px, pz}); + ObsIdType hxyz = sim->HamiltonianObservable({0.2, 0.6}, {tpxy, tpxz}); + + CHECK(sim->Var(hxyz) == Approx(0.04).margin(1e-5)); +} + +TEST_CASE("Var(Hamiltonian(Hermitian[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hxhz = sim->HamiltonianObservable({0.2, 0.3, 0.6}, {px, h, pz}); + + CHECK(sim->Var(hxhz) == Approx(0.36).margin(1e-5)); +} + +TEST_CASE("Var(Hamiltonian({TensorProd, Hermitian}[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType tp = sim->TensorObservable({px, pz}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {h, tp}); + + CHECK(sim->Var(hhtp) == Approx(1.0).margin(1e-5)); +} + +TEST_CASE("Var(Hamiltonian({Hamiltonian, Hermitian}[])) test", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hp = sim->HamiltonianObservable({0.2, 0.6}, {px, pz}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {h, hp}); + + CHECK(sim->Var(hhtp) == Approx(1.0).margin(1e-5)); +} + +TEST_CASE("Var(Hamiltonian({Hamiltonian(Hamiltonian), Hermitian}[])) test", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("PauliX", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + ObsIdType px = sim->Observable(ObsId::PauliX, {}, {Qs[2]}); + ObsIdType pz = sim->Observable(ObsId::PauliZ, {}, {Qs[1]}); + ObsIdType hp = sim->HamiltonianObservable({0.2, 0.6}, {px, pz}); + ObsIdType hhp = sim->HamiltonianObservable({1}, {hp}); + + std::vector> mat2{ + {1.0, 0.0}, {2.0, 0.0}, {-1.0, 0.0}, {3.0, 0.0}}; + ObsIdType h = sim->Observable(ObsId::Hermitian, mat2, {Qs[0]}); + ObsIdType hhtp = sim->HamiltonianObservable({0.5, 0.3}, {hhp, h}); + + CHECK(sim->Var(hhtp) == Approx(0.36).margin(1e-5)); +} + +TEST_CASE("State test with incorrect size", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs = sim->AllocateQubits(n); + + std::vector> state(1U << (n - 1)); + DataView, 1> view(state); + REQUIRE_THROWS_WITH( + sim->State(view), + Catch::Contains("Invalid size for the pre-allocated state vector")); +} + +TEST_CASE("State test with numWires=4", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs = sim->AllocateQubits(n); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + std::vector> state(1U << sim->GetNumQubits()); + DataView, 1> view(state); + sim->State(view); + + for (std::size_t i = 0; i < 16; i++) { + if (i == 4 || i == 6 || i == 12 || i == 14) { + CHECK(std::real(state[i]) == Approx(0.).margin(1e-5)); + CHECK(std::imag(state[i]) == Approx(0.5).margin(1e-5)); + } else { + CHECK(std::real(state[i]) == Approx(0.).margin(1e-5)); + CHECK(std::imag(state[i]) == Approx(0.).margin(1e-5)); + } + } +} + +TEST_CASE("PartialProbs test with incorrect numWires and numAlloc", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + std::vector probs_vec(1); + DataView probs_view(probs_vec); + + REQUIRE_THROWS_WITH( + sim->PartialProbs(probs_view, {Qs[0], Qs[1], Qs[2], Qs[3], Qs[0]}), + Catch::Contains("Invalid number of wires")); + + REQUIRE_THROWS_WITH( + sim->PartialProbs(probs_view, {Qs[0]}), + Catch::Contains( + "Invalid size for the pre-allocated partial-probabilities")); + + REQUIRE_THROWS_WITH( + sim->Probs(probs_view), + Catch::Contains("Invalid size for the pre-allocated probabilities")); + + sim->ReleaseQubit(Qs[0]); + + REQUIRE_THROWS_WITH(sim->PartialProbs(probs_view, {Qs[0]}), + Catch::Contains("Invalid given wires to measure")); +} + +TEST_CASE("Probs and PartialProbs tests with numWires=0-4", "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + std::vector probs0(1); + DataView view0(probs0); + sim->PartialProbs(view0, std::vector{}); + + std::vector probs1(2); + DataView view1(probs1); + sim->PartialProbs(view1, std::vector{Qs[2]}); + + std::vector probs2(4); + DataView view2(probs2); + sim->PartialProbs(view2, std::vector{Qs[0], Qs[3]}); + + std::vector probs3(16); + DataView view3(probs3); + sim->PartialProbs(view3, Qs); + + std::vector probs4(16); + DataView view4(probs4); + sim->Probs(view4); + + CHECK(probs0.size() == 1); + CHECK(probs0[0] == Approx(1.0)); + CHECK(probs1[0] == Approx(0.5).margin(1e-5)); + CHECK(probs1[1] == Approx(0.5).margin(1e-5)); + for (std::size_t i = 0; i < 4; i++) { + if (i == 0 || i == 2) { + CHECK(probs2[i] == Approx(0.5).margin(1e-5)); + } else { + CHECK(probs2[i] == Approx(0.).margin(1e-5)); + } + } + for (std::size_t i = 0; i < 16; i++) { + if (i == 4 || i == 6 || i == 12 || i == 14) { + CHECK(probs3[i] == Approx(0.25).margin(1e-5)); + CHECK(probs4[i] == Approx(0.25).margin(1e-5)); + } else { + CHECK(probs3[i] == Approx(0.).margin(1e-5)); + CHECK(probs4[i] == Approx(0.).margin(1e-5)); + } + } +} + +TEST_CASE("Probs and PartialProbs shots tests with numWires=0-4", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + constexpr std::size_t num_shots = 10000; + sim->SetDeviceShots(num_shots); + + sim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim->NamedOperation("PauliY", {}, {Qs[1]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[2]}, false); + sim->NamedOperation("PauliZ", {}, {Qs[3]}, false); + + std::vector probs0(1); + DataView view0(probs0); + sim->PartialProbs(view0, std::vector{}); + + std::vector probs1(2); + DataView view1(probs1); + sim->PartialProbs(view1, std::vector{Qs[2]}); + + std::vector probs2(4); + DataView view2(probs2); + sim->PartialProbs(view2, std::vector{Qs[0], Qs[3]}); + + std::vector probs3(16); + DataView view3(probs3); + sim->PartialProbs(view3, Qs); + + std::vector probs4(16); + DataView view4(probs4); + sim->Probs(view4); + + CHECK(probs0.size() == 1); + CHECK(probs0[0] == Approx(1.0).margin(5e-2)); + CHECK(probs1[0] == Approx(0.5).margin(5e-2)); + CHECK(probs1[1] == Approx(0.5).margin(5e-2)); + for (std::size_t i = 0; i < 4; i++) { + if (i == 0 || i == 2) { + CHECK(probs2[i] == Approx(0.5).margin(5e-2)); + } else { + CHECK(probs2[i] == Approx(0.).margin(5e-2)); + } + } + for (std::size_t i = 0; i < 16; i++) { + if (i == 4 || i == 6 || i == 12 || i == 14) { + CHECK(probs3[i] == Approx(0.25).margin(5e-2)); + CHECK(probs4[i] == Approx(0.25).margin(5e-2)); + } else { + CHECK(probs3[i] == Approx(0.).margin(5e-2)); + CHECK(probs4[i] == Approx(0.).margin(5e-2)); + } + } +} + +TEST_CASE("PartialSample test with incorrect numWires and numAlloc", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + std::vector samples_vec(1); + MemRefT samples{samples_vec.data(), + samples_vec.data(), + 0, + {samples_vec.size(), 1}, + {1, 1}}; + DataView view(samples.data_aligned, samples.offset, + samples.sizes, samples.strides); + + REQUIRE_THROWS_WITH( + sim->PartialSample(view, {Qs[0], Qs[1], Qs[2], Qs[3], Qs[0]}, 4), + Catch::Contains("Invalid number of wires")); + + REQUIRE_THROWS_WITH( + sim->PartialSample(view, {Qs[0], Qs[1]}, 2), + Catch::Contains("Invalid size for the pre-allocated partial-samples")); + + REQUIRE_THROWS_WITH( + sim->Sample(view, 2), + Catch::Contains("Invalid size for the pre-allocated samples")); + + sim->ReleaseQubit(Qs[0]); + + REQUIRE_THROWS_WITH(sim->PartialSample(view, {Qs[0]}, 4), + Catch::Contains("Invalid given wires to measure")); +} + +TEST_CASE("PartialCounts test with incorrect numWires and numAlloc", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + std::vector eigvals_vec(1); + DataView eigvals_view(eigvals_vec); + + std::vector counts_vec(1); + DataView counts_view(counts_vec); + + REQUIRE_THROWS_WITH(sim->PartialCounts(eigvals_view, counts_view, + {Qs[0], Qs[1], Qs[2], Qs[3], Qs[0]}, + 4), + Catch::Contains("Invalid number of wires")); + + REQUIRE_THROWS_WITH( + sim->PartialCounts(eigvals_view, counts_view, {Qs[0]}, 1), + Catch::Contains("Invalid size for the pre-allocated partial-counts")); + + REQUIRE_THROWS_WITH( + sim->Counts(eigvals_view, counts_view, 1), + Catch::Contains("Invalid size for the pre-allocated counts")); + + sim->ReleaseQubit(Qs[0]); + + REQUIRE_THROWS_WITH( + sim->PartialCounts(eigvals_view, counts_view, {Qs[0]}, 4), + Catch::Contains("Invalid given wires to measure")); +} + +TEST_CASE("Sample and PartialSample tests with numWires=0-4 shots=100", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("RX", {0.5}, {Qs[0]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + sim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + + std::size_t shots = 100; + + std::vector samples1(shots * 1); + MemRefT buffer1{ + samples1.data(), samples1.data(), 0, {shots, 1}, {1, 1}}; + DataView view1(buffer1.data_aligned, buffer1.offset, + buffer1.sizes, buffer1.strides); + sim->PartialSample(view1, std::vector{Qs[2]}, shots); + + std::vector samples2(shots * 2); + MemRefT buffer2{ + samples2.data(), samples2.data(), 0, {shots, 2}, {1, 1}}; + DataView view2(buffer2.data_aligned, buffer2.offset, + buffer2.sizes, buffer2.strides); + sim->PartialSample(view2, std::vector{Qs[0], Qs[3]}, shots); + + std::vector samples3(shots * 4); + MemRefT buffer3{ + samples3.data(), samples3.data(), 0, {shots, 4}, {1, 1}}; + DataView view3(buffer3.data_aligned, buffer3.offset, + buffer3.sizes, buffer3.strides); + sim->PartialSample(view3, Qs, shots); + + std::vector samples4(shots * 4); + MemRefT buffer4{ + samples4.data(), samples4.data(), 0, {shots, 4}, {1, 1}}; + DataView view4(buffer4.data_aligned, buffer4.offset, + buffer4.sizes, buffer4.strides); + sim->Sample(view4, shots); + + for (std::size_t i = 0; i < shots * 1; i++) + CHECK((samples1[i] == 0. || samples1[i] == 1.)); + for (std::size_t i = 0; i < shots * 2; i++) + CHECK((samples2[i] == 0. || samples2[i] == 1.)); + for (std::size_t i = 0; i < shots * 4; i++) + CHECK((samples3[i] == 0. || samples3[i] == 1.)); + for (std::size_t i = 0; i < shots * 4; i++) + CHECK((samples4[i] == 0. || samples4[i] == 1.)); +} + +TEST_CASE("Sample and PartialSample tests with numWires=0-4 " + "shots=1000 mcmc=True num_burnin=200", + "[Measures]") { + std::unique_ptr sim = + std::make_unique("{mcmc : True, num_burnin : 200}"); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs; + Qs.reserve(n); + for (std::size_t i = 0; i < n; i++) { + Qs.push_back(sim->AllocateQubit()); + } + + sim->NamedOperation("RX", {0.5}, {Qs[0]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + sim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + + std::size_t shots = 100; + + std::vector samples1(shots * 1); + MemRefT buffer1{ + samples1.data(), samples1.data(), 0, {shots, 1}, {1, 1}}; + DataView view1(buffer1.data_aligned, buffer1.offset, + buffer1.sizes, buffer1.strides); + sim->PartialSample(view1, std::vector{Qs[2]}, shots); + + std::vector samples2(shots * 2); + MemRefT buffer2{ + samples2.data(), samples2.data(), 0, {shots, 2}, {1, 1}}; + DataView view2(buffer2.data_aligned, buffer2.offset, + buffer2.sizes, buffer2.strides); + sim->PartialSample(view2, std::vector{Qs[0], Qs[3]}, shots); + + std::vector samples3(shots * 4); + MemRefT buffer3{ + samples3.data(), samples3.data(), 0, {shots, 4}, {1, 1}}; + DataView view3(buffer3.data_aligned, buffer3.offset, + buffer3.sizes, buffer3.strides); + sim->PartialSample(view3, Qs, shots); + + std::vector samples4(shots * 4); + MemRefT buffer4{ + samples4.data(), samples4.data(), 0, {shots, 4}, {1, 1}}; + DataView view4(buffer4.data_aligned, buffer4.offset, + buffer4.sizes, buffer4.strides); + sim->Sample(view4, shots); + + for (std::size_t i = 0; i < shots * 1; i++) + CHECK((samples1[i] == 0. || samples1[i] == 1.)); + for (std::size_t i = 0; i < shots * 2; i++) + CHECK((samples2[i] == 0. || samples2[i] == 1.)); + for (std::size_t i = 0; i < shots * 4; i++) + CHECK((samples3[i] == 0. || samples3[i] == 1.)); + for (std::size_t i = 0; i < shots * 4; i++) + CHECK((samples4[i] == 0. || samples4[i] == 1.)); +} + +TEST_CASE("Counts and PartialCounts tests with numWires=0-4 shots=100", + "[Measures]") { + std::unique_ptr sim = std::make_unique(); + + // state-vector with #qubits = n + constexpr std::size_t n = 4; + std::vector Qs = sim->AllocateQubits(n); + + sim->NamedOperation("RX", {0.5}, {Qs[0]}, false); + sim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + sim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + + std::size_t shots = 100; + + std::vector eigvals0(1); + std::vector counts0(1); + DataView eview0(eigvals0); + DataView cview0(counts0); + sim->PartialCounts(eview0, cview0, std::vector{}, shots); + + std::vector eigvals1(2); + std::vector counts1(2); + DataView eview1(eigvals1); + DataView cview1(counts1); + sim->PartialCounts(eview1, cview1, std::vector{Qs[2]}, shots); + + std::vector eigvals2(4); + std::vector counts2(4); + DataView eview2(eigvals2); + DataView cview2(counts2); + sim->PartialCounts(eview2, cview2, std::vector{Qs[0], Qs[3]}, + shots); + + std::vector eigvals3(16); + std::vector counts3(16); + DataView eview3(eigvals3); + DataView cview3(counts3); + sim->PartialCounts(eview3, cview3, Qs, shots); + + std::vector eigvals4(16); + std::vector counts4(16); + DataView eview4(eigvals4); + DataView cview4(counts4); + sim->Counts(eview4, cview4, shots); + + CHECK(eigvals0.size() == 1); + CHECK(eigvals0[0] == 0.0); + CHECK(counts0.size() == 1); + CHECK(counts0[0] == static_cast(shots)); + CHECK((eigvals1[0] == 0. && eigvals1[1] == 1.)); + CHECK((eigvals2[0] == 0. && eigvals2[1] == 1. && eigvals2[2] == 2. && + eigvals2[3] == 3.)); + for (std::size_t i = 0; i < 16; i++) { + CHECK(eigvals3[i] == static_cast(i)); + CHECK(eigvals4[i] == static_cast(i)); + } + + CHECK(counts1[0] + counts1[1] == static_cast(shots)); + CHECK(counts2[0] + counts2[1] + counts2[2] + counts2[3] == + static_cast(shots)); + std::size_t sum3 = 0, sum4 = 0; + for (std::size_t i = 0; i < 16; i++) { + sum3 += counts3[i]; + sum4 += counts4[i]; + } + CHECK(sum3 == shots); + CHECK(sum4 == shots); +} + +TEST_CASE("Measurement with a seeded device", "[Measures]") { + std::array, 2> sims; + std::vector gens{std::mt19937{37}, std::mt19937{37}}; + + auto circuit = [](LGPUSimulator &sim, std::mt19937 &gen) { + sim.SetDevicePRNG(&gen); + std::vector Qs; + Qs.reserve(1); + Qs.push_back(sim.AllocateQubit()); + sim.NamedOperation("Hadamard", {}, {Qs[0]}, false); + auto m = sim.Measure(Qs[0]); + return m; + }; + + for (std::size_t trial = 0; trial < 5; trial++) { + sims[0] = std::make_unique(); + sims[1] = std::make_unique(); + + auto m0 = circuit(*(sims[0]), gens[0]); + auto m1 = circuit(*(sims[1]), gens[1]); + + CHECK(*m0 == *m1); + } +} + +TEST_CASE("Sample with a seeded device", "[Measures]") { + std::size_t shots = 100; + std::array, 2> sims; + std::vector> sample_vec(2, + std::vector(shots * 4)); + + std::vector> buffers{ + MemRefT{ + sample_vec[0].data(), sample_vec[0].data(), 0, {shots, 1}, {1, 1}}, + MemRefT{ + sample_vec[1].data(), sample_vec[1].data(), 0, {shots, 1}, {1, 1}}, + }; + std::vector> views{ + DataView(buffers[0].data_aligned, buffers[0].offset, + buffers[0].sizes, buffers[0].strides), + DataView(buffers[1].data_aligned, buffers[1].offset, + buffers[1].sizes, buffers[1].strides)}; + + std::vector gens{std::mt19937{37}, std::mt19937{37}}; + + auto circuit = [shots](LGPUSimulator &sim, DataView &view, + std::mt19937 &gen) { + sim.SetDevicePRNG(&gen); + std::vector Qs; + Qs.reserve(1); + Qs.push_back(sim.AllocateQubit()); + sim.NamedOperation("Hadamard", {}, {Qs[0]}, false); + sim.NamedOperation("RX", {0.5}, {Qs[0]}, false); + sim.Sample(view, shots); + }; + + for (std::size_t trial = 0; trial < 5; trial++) { + sims[0] = std::make_unique(); + sims[1] = std::make_unique(); + + for (std::size_t sim_idx = 0; sim_idx < sims.size(); sim_idx++) { + circuit(*(sims[sim_idx]), views[sim_idx], gens[sim_idx]); + } + + for (std::size_t i = 0; i < sample_vec[0].size(); i++) { + CHECK((sample_vec[0][i] == sample_vec[1][i])); + } + } +} diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUSimulator.cpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUSimulator.cpp new file mode 100644 index 0000000000..a5068f3f7a --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/Test_LightningGPUSimulator.cpp @@ -0,0 +1,724 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "LightningGPUSimulator.hpp" +#include "QuantumDevice.hpp" +#include "TestHelpers.hpp" + +/// @cond DEV +namespace { +using namespace Catalyst::Runtime::Simulator; +using namespace Pennylane::Util; +using LGPUSimulator = LightningGPUSimulator; +using QDevice = Catalyst::Runtime::QuantumDevice; + +GENERATE_DEVICE_FACTORY(LightningGPUSimulator, + Catalyst::Runtime::Simulator::LightningGPUSimulator); +} // namespace +/// @endcond + +/** + * @brief Tests the LightningGPUSimulator class. + * + */ +TEST_CASE("LightningGPUSimulator::constructor", "[constructibility]") { + SECTION("LightningGPUSimulator") { + REQUIRE(std::is_constructible::value); + } + SECTION("LightningGPUSimulator(string))") { + REQUIRE(std::is_constructible::value); + } +} + +TEST_CASE("Test the device factory method", "[constructibility]") { + std::unique_ptr LGPUsim(LightningGPUSimulatorFactory("")); + REQUIRE(LGPUsim->GetNumQubits() == 0); +} + +TEST_CASE("LightningGPUSimulator::unit_tests", "[unit tests]") { + SECTION("Managing Qubits") { + std::unique_ptr LGPUsim = + std::make_unique(); + std::vector Qs = LGPUsim->AllocateQubits(0); + REQUIRE(LGPUsim->GetNumQubits() == 0); + LGPUsim->AllocateQubits(2); + REQUIRE(LGPUsim->GetNumQubits() == 2); + LGPUsim->AllocateQubits(2); + REQUIRE(LGPUsim->GetNumQubits() == 4); + LGPUsim->ReleaseQubit(0); + REQUIRE( + LGPUsim->GetNumQubits() == + 4); // releasing only one qubit does not change the total number. + LGPUsim->ReleaseAllQubits(); + REQUIRE(LGPUsim->GetNumQubits() == + 0); // releasing all qubits resets the simulator. + } + SECTION("Tape recording") { + std::unique_ptr LGPUsim = + std::make_unique(); + std::vector Qs = LGPUsim->AllocateQubits(1); + REQUIRE_NOTHROW(LGPUsim->StartTapeRecording()); + REQUIRE_THROWS_WITH( + LGPUsim->StartTapeRecording(), + Catch::Matchers::Contains("Cannot re-activate the cache manager")); + REQUIRE_NOTHROW(LGPUsim->StopTapeRecording()); + REQUIRE_THROWS_WITH( + LGPUsim->StopTapeRecording(), + Catch::Matchers::Contains( + "Cannot stop an already stopped cache manager")); + } +} + +TEST_CASE("LightningGPUSimulator::GateSet", "[GateSet]") { + SECTION("Identity gate") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 10; + std::vector Qs; + Qs.reserve(n_qubits); + for (std::size_t ind = 0; ind < n_qubits; ind++) { + Qs[ind] = LGPUsim->AllocateQubit(); + } + + for (std::size_t ind = 0; ind < n_qubits; ind += 2) { + LGPUsim->NamedOperation("Identity", {}, {Qs[ind]}, false); + } + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{1, 0}); + + std::complex sum{0, 0}; + for (std::size_t ind = 1; ind < state.size(); ind++) { + sum += state[ind]; + } + + CHECK(sum == std::complex{0, 0}); + } + + SECTION("PauliX gate") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 3; + std::vector Qs; + Qs.reserve(n_qubits); + for (std::size_t ind = 0; ind < n_qubits; ind++) { + Qs[ind] = LGPUsim->AllocateQubit(); + } + + for (std::size_t ind = 0; ind < n_qubits; ind++) { + LGPUsim->NamedOperation("PauliX", {}, {Qs[ind]}, false); + } + for (std::size_t ind = n_qubits; ind > 0; ind--) { + LGPUsim->NamedOperation("PauliX", {}, {Qs[ind - 1]}, false); + } + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{1, 0}); + + std::complex sum{0, 0}; + for (std::size_t ind = 1; ind < state.size(); ind++) { + sum += state[ind]; + } + + CHECK(sum == std::complex{0, 0}); + } + + SECTION("PauliY gate") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs; + Qs.reserve(n_qubits); + for (std::size_t ind = 0; ind < n_qubits; ind++) { + Qs[ind] = LGPUsim->AllocateQubit(); + } + + for (std::size_t ind = 0; ind < n_qubits; ind++) { + LGPUsim->NamedOperation("PauliY", {}, {Qs[ind]}, false); + } + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{0, 0}); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK(state.at(2) == std::complex{0, 0}); + CHECK(state.at(3) == std::complex{-1, 0}); + } + + SECTION("PauliY and PauliZ gates") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs; + Qs.reserve(n_qubits); + for (std::size_t ind = 0; ind < n_qubits; ind++) { + Qs[ind] = LGPUsim->AllocateQubit(); + } + + LGPUsim->NamedOperation("PauliY", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("PauliZ", {}, {Qs[1]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{0, 0}); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK(state.at(2) == std::complex{0, 1}); + CHECK(state.at(3) == std::complex{0, 0}); + } + + SECTION("Hadamard gate") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs; + Qs.reserve(n_qubits); + for (std::size_t ind = 0; ind < n_qubits; ind++) { + Qs[ind] = LGPUsim->AllocateQubit(); + } + + for (std::size_t ind = 0; ind < n_qubits; ind++) { + LGPUsim->NamedOperation("Hadamard", {}, {Qs[ind]}, false); + } + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state[0] == + PLApproxComplex(std::complex{0.5, 0}).epsilon(1e-5)); + CHECK(state.at(1) == state.at(0)); + CHECK(state.at(2) == state.at(0)); + CHECK(state.at(3) == state.at(0)); + } + + SECTION("R(X, Y, Z) and PauliX gates") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 4; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("PauliX", {}, {Qs[0]}, false); + + LGPUsim->NamedOperation("RX", {0.123}, {Qs[1]}, false); + LGPUsim->NamedOperation("RY", {0.456}, {Qs[2]}, false); + LGPUsim->NamedOperation("RZ", {0.789}, {Qs[3]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + // calculated by pennylane. + CHECK(state.at(0) == std::complex{0, 0}); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK(state.at(2) == std::complex{0, 0}); + CHECK(state.at(3) == std::complex{0, 0}); + CHECK(state.at(4) == std::complex{0, 0}); + CHECK(state.at(5) == std::complex{0, 0}); + CHECK(state.at(6) == std::complex{0, 0}); + CHECK(state.at(7) == std::complex{0, 0}); + CHECK(state[8] == + PLApproxComplex( + std::complex{0.8975969498074641, -0.3736920921192206}) + .epsilon(1e-5)); + CHECK(state.at(9) == std::complex{0, 0}); + CHECK(state[10] == + PLApproxComplex(std::complex{0.20827363966052723, + -0.08670953277495183}) + .epsilon(1e-5)); + CHECK(state.at(11) == std::complex{0, 0}); + CHECK(state[12] == + PLApproxComplex(std::complex{-0.023011082205037697, + -0.055271914055973925}) + .epsilon(1e-5)); + CHECK(state.at(13) == std::complex{0, 0}); + CHECK(state[14] == + PLApproxComplex(std::complex{-0.005339369573836912, + -0.012825002038956146}) + .epsilon(1e-5)); + CHECK(state.at(15) == std::complex{0, 0}); + } + + SECTION("Hadamard, RX, PhaseShift with cache manager") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs; + Qs.reserve(n_qubits); + + Qs[0] = LGPUsim->AllocateQubit(); + Qs[1] = LGPUsim->AllocateQubit(); + + LGPUsim->StartTapeRecording(); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("RX", {0.123}, {Qs[1]}, false); + LGPUsim->NamedOperation("PhaseShift", {0.456}, {Qs[0]}, false); + LGPUsim->StopTapeRecording(); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + // calculated by pennylane. + CHECK(state[0] == PLApproxComplex(std::complex{0.7057699753, 0}) + .epsilon(1e-5)); + CHECK(state[1] == PLApproxComplex(std::complex{0, -0.04345966}) + .epsilon(1e-5)); + CHECK(state[2] == + PLApproxComplex(std::complex{0.63365519, 0.31079312}) + .epsilon(1e-5)); + CHECK(state[3] == + PLApproxComplex(std::complex{0.01913791, -0.039019}) + .epsilon(1e-5)); + + std::tuple, std::vector> + expected{3, 0, 2, {"Hadamard", "RX", "PhaseShift"}, {}}; + REQUIRE(LGPUsim->CacheManagerInfo() == expected); + } + + // ============= 2-qubit operations ============= + + SECTION("PauliX and CNOT") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs; + Qs.reserve(n_qubits); + + for (std::size_t i = 0; i < n_qubits; i++) { + Qs[i] = LGPUsim->AllocateQubit(); + } + + LGPUsim->NamedOperation("PauliX", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{0, 0}); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK(state.at(2) == std::complex{0, 0}); + CHECK(state.at(3) == std::complex{1, 0}); + } + + SECTION("Hadamard and CR(X, Y, Z)") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 4; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("CRX", {0.123}, {Qs[0], Qs[1]}, false); + LGPUsim->NamedOperation("CRY", {0.456}, {Qs[0], Qs[2]}, false); + LGPUsim->NamedOperation("CRZ", {0.789}, {Qs[0], Qs[3]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + // calculated by pennylane. + CHECK( + state[0] == + PLApproxComplex(std::complex{M_SQRT1_2, 0}).epsilon(1e-5)); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK(state.at(2) == std::complex{0, 0}); + CHECK(state.at(3) == std::complex{0, 0}); + CHECK(state.at(4) == std::complex{0, 0}); + CHECK(state.at(5) == std::complex{0, 0}); + CHECK(state.at(6) == std::complex{0, 0}); + CHECK(state.at(7) == std::complex{0, 0}); + CHECK(state[8] == + PLApproxComplex( + std::complex{0.6346968899812189, -0.2642402124132889}) + .epsilon(1e-5)); + CHECK(state.at(9) == std::complex{0, 0}); + CHECK(state[10] == + PLApproxComplex(std::complex{0.14727170294636227, + -0.061312898618685635}) + .epsilon(1e-5)); + CHECK(state.at(11) == std::complex{0, 0}); + CHECK(state[12] == + PLApproxComplex(std::complex{-0.016271292269623247, + -0.03908314523813921}) + .epsilon(1e-5)); + CHECK(state.at(13) == std::complex{0, 0}); + CHECK(state[14] == + PLApproxComplex(std::complex{-0.0037755044329212074, + -0.009068645910477189}) + .epsilon(1e-5)); + CHECK(state.at(15) == std::complex{0, 0}); + } + + SECTION("Hadamard and CRot") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("CRot", {M_PI, M_PI_2, 0.5}, {Qs[0], Qs[1]}, + false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK( + state[0] == + PLApproxComplex(std::complex{M_SQRT1_2, 0}).epsilon(1e-5)); + + CHECK(state[1] == + PLApproxComplex(std::complex{0, 0}).epsilon(1e-5)); + + CHECK(state[2] == PLApproxComplex(std::complex{-0.1237019796, + -0.4844562109}) + .epsilon(1e-5)); + CHECK(state[3] == + PLApproxComplex(std::complex{0.1237019796, -0.4844562109}) + .epsilon(1e-5)); + } + + SECTION("Hadamard, PauliZ, IsingXY, SWAP") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("PauliZ", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("IsingXY", {0.2}, {Qs[1], Qs[0]}, false); + LGPUsim->NamedOperation("SWAP", {}, {Qs[0], Qs[1]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK( + state[0] == + PLApproxComplex(std::complex{M_SQRT1_2, 0}).epsilon(1e-5)); + CHECK(state[1] == PLApproxComplex(std::complex{-0.70357419, 0}) + .epsilon(1e-5)); + CHECK(state[2] == PLApproxComplex(std::complex{0, -0.07059289}) + .epsilon(1e-5)); + CHECK(state[3] == + PLApproxComplex(std::complex{0, 0}).epsilon(1e-5)); + } + + SECTION("Hadamard, PauliX and Toffoli") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 3; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("PauliX", {}, {Qs[1]}, false); + LGPUsim->NamedOperation("Toffoli", {}, {Qs[0], Qs[1], Qs[2]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state.at(0) == std::complex{0, 0}); + CHECK(state.at(1) == std::complex{0, 0}); + CHECK( + state[2] == + PLApproxComplex(std::complex{M_SQRT1_2, 0}).epsilon(1e-5)); + CHECK(state.at(3) == std::complex{0, 0}); + CHECK(state.at(4) == std::complex{0, 0}); + CHECK(state.at(5) == std::complex{0, 0}); + CHECK(state.at(6) == std::complex{0, 0}); + CHECK( + state[7] == + PLApproxComplex(std::complex{M_SQRT1_2, 0}).epsilon(1e-5)); + } + + SECTION("RX, Hadamard and MultiRZ") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("RX", {M_PI}, {Qs[1]}, false); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + LGPUsim->NamedOperation("MultiRZ", {M_PI}, {Qs[0], Qs[1]}, false); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state[2] == + PLApproxComplex(std::complex{-1, 0}).margin(1e-5)); + } + + SECTION("Hadamard, CNOT and Matrix") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("CNOT", {}, {Qs[0], Qs[1]}, false); + + const std::vector wires = {Qs[0]}; + std::vector> matrix{ + {-0.6709485262524046, -0.6304426335363695}, + {-0.14885403153998722, 0.3608498832392019}, + {-0.2376311670004963, 0.3096798175687841}, + {-0.8818365947322423, -0.26456390390903695}, + }; + LGPUsim->MatrixOperation(matrix, wires, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state[0] == + PLApproxComplex(std::complex{-0.474432, -0.44579}) + .epsilon(1e-5)); + CHECK(state[1] == + PLApproxComplex(std::complex{-0.105256, 0.255159}) + .epsilon(1e-5)); + CHECK(state[2] == + PLApproxComplex(std::complex{-0.168031, 0.218977}) + .epsilon(1e-5)); + CHECK(state[3] == + PLApproxComplex(std::complex{-0.623553, -0.187075}) + .epsilon(1e-5)); + } + + SECTION("Hadamard, CR(X, Y, Z) and Matrix") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 4; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("CRX", {0.123}, {Qs[0], Qs[1]}, false); + LGPUsim->NamedOperation("CRY", {0.456}, {Qs[0], Qs[2]}, false); + LGPUsim->NamedOperation("CRZ", {0.789}, {Qs[0], Qs[3]}, false); + + const std::vector wires = {Qs[0], Qs[1], Qs[2]}; + std::vector> matrix{ + {-0.14601911598243822, -0.18655250647340088}, + {-0.03917826201290317, -0.031161687050443518}, + {0.11497626236175404, 0.38310733543366354}, + {-0.0929691815340695, 0.1219804125497268}, + {0.07306514883467692, 0.017445444816725875}, + {-0.27330866098918355, -0.6007032759764033}, + {0.4530754397715841, -0.08267189625512258}, + {0.32125201986075, -0.036845158875036116}, + {0.032317572838307884, 0.02292755555300329}, + {-0.18775945295623664, -0.060215004737844156}, + {-0.3093351335745536, -0.2061961962889725}, + {0.4216087567144761, 0.010534488410902099}, + {0.2769943541718527, -0.26016137877135465}, + {0.18727884147867532, 0.02830415812286322}, + {0.3367562196770689, -0.5250999173939218}, + {0.05770014289220745, 0.26595514845958573}, + {0.37885720163317027, 0.3110931426403546}, + {0.13436510737129648, -0.4083415934958021}, + {-0.5443665467635203, 0.2458343977310266}, + {-0.050346912365833024, 0.08709833123617361}, + {0.11505259829552131, 0.010155858056939438}, + {-0.2930849061531229, 0.019339259194141145}, + {0.011825409829453282, 0.011597907736881019}, + {-0.10565527258356637, -0.3113689446440079}, + {0.0273191284561944, -0.2479498526173881}, + {-0.5528072425836249, -0.06114469689935285}, + {-0.20560364740746587, -0.3800208994544297}, + {-0.008236143958221483, 0.3017421511504845}, + {0.04817188123334976, 0.08550951191632741}, + {-0.24081054643565586, -0.3412671345149831}, + {-0.38913538197001885, 0.09288402897806938}, + {-0.07937578245883717, 0.013979426755633685}, + {0.22246583652015395, -0.18276674810033927}, + {0.22376666162382491, 0.2995723155125488}, + {-0.1727191441070097, -0.03880522034607489}, + {0.075780203819001, 0.2818783673816625}, + {-0.6161322400651016, 0.26067347179217193}, + {-0.021161519614267765, -0.08430919051054794}, + {0.1676500381348944, -0.30645601624407504}, + {-0.28858251997285883, 0.018089595494883842}, + {-0.19590767481842053, -0.12844366632033652}, + {0.18707834504831794, -0.1363932722670649}, + {-0.07224221779769334, -0.11267803536286894}, + {-0.23897684826459387, -0.39609971967853685}, + {-0.0032110880452929555, -0.29294331305690136}, + {-0.3188741682462722, -0.17338979346647143}, + {0.08194395032821632, -0.002944814673179825}, + {-0.5695791830944521, 0.33299548924055095}, + {-0.4983660307441444, -0.4222358493977972}, + {0.05533914327048402, -0.42575842134560576}, + {-0.2187623521182678, -0.03087596187054778}, + {0.11278255885846857, 0.07075886163492914}, + {-0.3054684775292515, -0.1739796870866232}, + {0.14151567663565712, 0.20399935744127418}, + {0.06720165377364941, 0.07543463072363207}, + {0.08019665306716581, -0.3473013434358584}, + {-0.2600167605995786, -0.08795704036197827}, + {0.125680477777759, 0.266342700305046}, + {-0.1586772594600269, 0.187360909108502}, + {-0.4653314704208982, 0.4048609954619629}, + {0.39992560380733094, -0.10029244177901954}, + {0.2533527906886461, 0.05222114898540775}, + {-0.15840033949128557, -0.2727320427534386}, + {-0.21590866323269536, -0.1191163626522938}, + }; + LGPUsim->MatrixOperation(matrix, wires, false); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + CHECK(state[0] == + PLApproxComplex(std::complex{-0.141499, -0.230993}) + .epsilon(1e-5)); + CHECK(state[2] == + PLApproxComplex(std::complex{0.135423, -0.235563}) + .epsilon(1e-5)); + CHECK(state[4] == + PLApproxComplex(std::complex{0.299458, 0.218321}) + .epsilon(1e-5)); + CHECK(state[6] == + PLApproxComplex(std::complex{0.0264869, -0.154913}) + .epsilon(1e-5)); + CHECK(state[8] == + PLApproxComplex(std::complex{-0.186607, 0.188884}) + .epsilon(1e-5)); + CHECK(state[10] == + PLApproxComplex(std::complex{-0.271843, -0.281136}) + .epsilon(1e-5)); + CHECK(state[12] == + PLApproxComplex(std::complex{-0.560499, -0.310176}) + .epsilon(1e-5)); + CHECK(state[14] == + PLApproxComplex(std::complex{0.0756372, -0.226334}) + .epsilon(1e-5)); + } + + SECTION("Hadamard and IsingZZ and cache manager") { + std::unique_ptr LGPUsim = + std::make_unique(); + + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + LGPUsim->StartTapeRecording(); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[0]}, false); + LGPUsim->NamedOperation("Hadamard", {}, {Qs[1]}, false); + LGPUsim->NamedOperation("IsingZZ", {M_PI_4}, {Qs[0], Qs[1]}, false); + LGPUsim->StopTapeRecording(); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + std::complex c1{0.4619397663, -0.1913417162}; + std::complex c2{0.4619397663, 0.1913417162}; + + CHECK(state[0] == PLApproxComplex(c1).epsilon(1e-5)); + CHECK(state[1] == PLApproxComplex(c2).epsilon(1e-5)); + CHECK(state[2] == PLApproxComplex(c2).epsilon(1e-5)); + CHECK(state[3] == PLApproxComplex(c1).epsilon(1e-5)); + + std::tuple, std::vector> + expected{3, 0, 1, {"Hadamard", "Hadamard", "IsingZZ"}, {}}; + REQUIRE(LGPUsim->CacheManagerInfo() == expected); + } + + SECTION("Test setStateVector") { + std::unique_ptr LGPUsim = + std::make_unique(); + constexpr std::size_t n_qubits = 2; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + std::vector> data = {{0.5, 0.5}, {0.0, 0.0}}; + DataView, 1> data_view(data); + std::vector wires = {1}; + LGPUsim->SetState(data_view, wires); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + std::complex c1{0.5, 0.5}; + std::complex c2{0.0, 0.0}; + CHECK(state[0] == PLApproxComplex(c1).epsilon(1e-5)); + CHECK(state[1] == PLApproxComplex(c2).epsilon(1e-5)); + CHECK(state[2] == PLApproxComplex(c2).epsilon(1e-5)); + CHECK(state[3] == PLApproxComplex(c2).epsilon(1e-5)); + } + + SECTION("Test setBasisState") { + std::unique_ptr LGPUsim = + std::make_unique(); + constexpr std::size_t n_qubits = 1; + std::vector Qs = LGPUsim->AllocateQubits(n_qubits); + + std::vector data = {0}; + DataView data_view(data); + std::vector wires = {0}; + LGPUsim->SetBasisState(data_view, wires); + + std::vector> state(1U << LGPUsim->GetNumQubits()); + DataView, 1> view(state); + LGPUsim->State(view); + + std::complex c1{1.0, 0.0}; + std::complex c2{0.0, 0.0}; + CHECK(state[0] == PLApproxComplex(c1).epsilon(1e-5)); + CHECK(state[1] == PLApproxComplex(c2).epsilon(1e-5)); + } +} diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/runner_lightning_gpu_catalyst.cpp b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/runner_lightning_gpu_catalyst.cpp new file mode 100644 index 0000000000..4ed06df1f7 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/catalyst/tests/runner_lightning_gpu_catalyst.cpp @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include diff --git a/pennylane_lightning/core/src/simulators/lightning_kokkos/catalyst/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_kokkos/catalyst/CMakeLists.txt index cdf0570904..433e16d992 100644 --- a/pennylane_lightning/core/src/simulators/lightning_kokkos/catalyst/CMakeLists.txt +++ b/pennylane_lightning/core/src/simulators/lightning_kokkos/catalyst/CMakeLists.txt @@ -7,71 +7,8 @@ add_library(lightning_kokkos_catalyst SHARED ${LK_CATALYST_FILES}) include(FetchContent) -if(LIGHTNING_CATALYST_SRC_PATH) - if(NOT IS_ABSOLUTE ${LIGHTNING_CATALYST_SRC_PATH}) - message(FATAL_ERROR " LIGHTNING_CATALYST_SRC_PATH=${LIGHTNING_CATALYST_SRC_PATH} must be set to an absolute path") - endif() - if(CATALYST_GIT_TAG) - message(WARN " Setting `LIGHTNING_CATALYST_SRC_PATH=${LIGHTNING_CATALYST_SRC_PATH}` overrides `CATALYST_GIT_TAG=${CATALYST_GIT_TAG}`") - endif() - - # Acquire local git hash and use for CATALYST_GIT_TAG - execute_process(COMMAND git rev-parse --short HEAD - WORKING_DIRECTORY ${LIGHTNING_CATALYST_SRC_PATH} - OUTPUT_VARIABLE CATALYST_GIT_TAG - ) - message(INFO " Building against local Catalyst - path: ${LIGHTNING_CATALYST_SRC_PATH} - GIT TAG: ${CATALYST_GIT_TAG}") - - target_include_directories(lightning_kokkos_catalyst PUBLIC ${LIGHTNING_CATALYST_SRC_PATH}/runtime/lib/backend/common) - target_include_directories(lightning_kokkos_catalyst PUBLIC ${LIGHTNING_CATALYST_SRC_PATH}/runtime/include) - -else() - if(NOT CATALYST_GIT_TAG) - set(CATALYST_GIT_TAG "main" CACHE STRING "GIT_TAG value to build Catalyst") - endif() - message(INFO " Building against Catalyst GIT TAG ${CATALYST_GIT_TAG}") - - # Fetching /lib/backend/common hpp headers - set(LIB_BACKEND_COMMON_HEADERS CacheManager.hpp - QubitManager.hpp - Utils.hpp - ) - - foreach(HEADER ${LIB_BACKEND_COMMON_HEADERS}) - string(REGEX REPLACE "\\.[^.]*$" "" HEADER_NAME ${HEADER}) - FetchContent_Declare( - ${HEADER_NAME} - URL https://raw.githubusercontent.com/PennyLaneAI/catalyst/${CATALYST_GIT_TAG}/runtime/lib/backend/common/${HEADER} - DOWNLOAD_NO_EXTRACT True - SOURCE_DIR include - ) - - FetchContent_MakeAvailable(${HEADER_NAME}) - endforeach() - - # Fetching include hpp headers - set(INCLUDE_HEADERS DataView.hpp - Exception.hpp - QuantumDevice.hpp - RuntimeCAPI.h - Types.h - ) - - foreach(HEADER ${INCLUDE_HEADERS}) - string(REGEX REPLACE "\\.[^.]*$" "" HEADER_NAME ${HEADER}) - FetchContent_Declare( - ${HEADER_NAME} - URL https://raw.githubusercontent.com/PennyLaneAI/catalyst/${CATALYST_GIT_TAG}/runtime/include/${HEADER} - DOWNLOAD_NO_EXTRACT True - SOURCE_DIR include - ) - - FetchContent_MakeAvailable(${HEADER_NAME}) - endforeach() - - target_include_directories(lightning_kokkos_catalyst PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/include) - -endif() +include("${pennylane_lightning_SOURCE_DIR}/cmake/support_catalyst.cmake") +FindCatalyst(lightning_kokkos_catalyst) target_include_directories(lightning_kokkos_catalyst INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(lightning_kokkos_catalyst PUBLIC lightning_compile_options diff --git a/pennylane_lightning/lightning_gpu/lightning_gpu.py b/pennylane_lightning/lightning_gpu/lightning_gpu.py index 56454613cc..7dbf02ce28 100644 --- a/pennylane_lightning/lightning_gpu/lightning_gpu.py +++ b/pennylane_lightning/lightning_gpu/lightning_gpu.py @@ -18,6 +18,8 @@ """ from __future__ import annotations +import os +import sys from ctypes.util import find_library from dataclasses import replace from importlib import util as imp_util @@ -152,6 +154,7 @@ "Sum", "Prod", "SProd", + "Exp", } ) @@ -537,3 +540,43 @@ def simulate( state.reset_state() final_state = state.get_final_state(circuit) return self.LightningMeasurements(final_state).measure_final_state(circuit) + + @staticmethod + def get_c_interface(): + """Returns a tuple consisting of the device name, and + the location to the shared object with the C/C++ device implementation. + """ + + # The shared object file extension varies depending on the underlying operating system + file_extension = "" + OS = sys.platform + if OS == "linux": + file_extension = ".so" + else: + raise RuntimeError( + f"'LightningGPUSimulator' shared library not available for '{OS}' platform" + ) # pragma: no cover + + lib_name = "liblightning_gpu_catalyst" + file_extension + package_root = Path(__file__).parent + + # The absolute path of the plugin shared object varies according to the installation mode. + + # Wheel mode: + # Fixed location at the root of the project + wheel_mode_location = package_root.parent / lib_name + if wheel_mode_location.is_file(): + return "LightningGPUSimulator", wheel_mode_location.as_posix() + + # Editable mode: + # The build directory contains a folder which varies according to the platform: + # lib.--" + # To avoid mismatching the folder name, we search for the shared object instead. + # TODO: locate where the naming convention of the folder is decided and replicate it here. + editable_mode_path = package_root.parent.parent / "build_lightning_gpu" + for path, _, files in os.walk(editable_mode_path): + if lib_name in files: + lib_location = (Path(path) / lib_name).as_posix() + return "LightningGPUSimulator", lib_location + + raise RuntimeError("'LightningGPUSimulator' shared library not found") # pragma: no cover diff --git a/pennylane_lightning/lightning_gpu/lightning_gpu.toml b/pennylane_lightning/lightning_gpu/lightning_gpu.toml index b18470da6b..d4a4373754 100644 --- a/pennylane_lightning/lightning_gpu/lightning_gpu.toml +++ b/pennylane_lightning/lightning_gpu/lightning_gpu.toml @@ -1,46 +1,45 @@ -# TODO: verify the contents of this file against what the device reports. schema = 2 # The union of all gate types listed in this section must match what # the device considers "supported" through PennyLane's device API. [operators.gates.native] -Identity = { properties = [ "differentiable" ] } -PauliX = { properties = [ "differentiable" ] } -PauliY = { properties = [ "differentiable" ] } -PauliZ = { properties = [ "differentiable" ] } -Hadamard = { properties = [ "differentiable" ] } -S = { properties = [ "differentiable" ] } -T = { properties = [ "differentiable" ] } -PhaseShift = { properties = [ "differentiable" ] } -RX = { properties = [ "differentiable" ] } -RY = { properties = [ "differentiable" ] } -RZ = { properties = [ "differentiable" ] } -Rot = { properties = [ "differentiable" ] } -CNOT = { properties = [ "differentiable" ] } -CY = { properties = [ "differentiable" ] } -CZ = { properties = [ "differentiable" ] } -SWAP = { properties = [ "differentiable" ] } -CSWAP = { properties = [ "differentiable" ] } -Toffoli = { properties = [ "differentiable" ] } -IsingXX = { properties = [ "differentiable" ] } -IsingXY = { properties = [ "differentiable" ] } -IsingYY = { properties = [ "differentiable" ] } -IsingZZ = { properties = [ "differentiable" ] } -ControlledPhaseShift = { properties = [ "differentiable" ] } -CRX = { properties = [ "differentiable" ] } -CRY = { properties = [ "differentiable" ] } -CRZ = { properties = [ "differentiable" ] } -CRot = { properties = [ "differentiable" ] } -SingleExcitation = { properties = [ "differentiable" ] } -SingleExcitationPlus = { properties = [ "differentiable" ] } -SingleExcitationMinus = { properties = [ "differentiable" ] } -DoubleExcitation = { properties = [ "differentiable" ] } -DoubleExcitationPlus = { properties = [ "differentiable" ] } -DoubleExcitationMinus = { properties = [ "differentiable" ] } -MultiRZ = { properties = [ "differentiable" ] } -QubitUnitary = { properties = [ "differentiable" ] } -GlobalPhase = { properties = [ "differentiable" ] } +Identity = { properties = [ "invertible", "differentiable" ] } +PauliX = { properties = [ "invertible", "differentiable" ] } +PauliY = { properties = [ "invertible", "differentiable" ] } +PauliZ = { properties = [ "invertible", "differentiable" ] } +Hadamard = { properties = [ "invertible", "differentiable" ] } +S = { properties = [ "invertible", "differentiable" ] } +T = { properties = [ "invertible", "differentiable" ] } +PhaseShift = { properties = [ "invertible", "differentiable" ] } +RX = { properties = [ "invertible", "differentiable" ] } +RY = { properties = [ "invertible", "differentiable" ] } +RZ = { properties = [ "invertible", "differentiable" ] } +Rot = { properties = [ "invertible", ] } +CNOT = { properties = [ "invertible", "differentiable" ] } +CY = { properties = [ "invertible", "differentiable" ] } +CZ = { properties = [ "invertible", "differentiable" ] } +SWAP = { properties = [ "invertible", "differentiable" ] } +CSWAP = { properties = [ "invertible", "differentiable" ] } +Toffoli = { properties = [ "invertible", "differentiable" ] } +IsingXX = { properties = [ "invertible", "differentiable" ] } +IsingXY = { properties = [ "invertible", "differentiable" ] } +IsingYY = { properties = [ "invertible", "differentiable" ] } +IsingZZ = { properties = [ "invertible", "differentiable" ] } +ControlledPhaseShift = { properties = [ "invertible", "differentiable" ] } +CRX = { properties = [ "invertible", "differentiable" ] } +CRY = { properties = [ "invertible", "differentiable" ] } +CRZ = { properties = [ "invertible", "differentiable" ] } +CRot = { properties = [ "invertible", ] } +SingleExcitation = { properties = [ "invertible", "differentiable" ] } +SingleExcitationPlus = { properties = [ "invertible", "differentiable" ] } +SingleExcitationMinus = { properties = [ "invertible", "differentiable" ] } +DoubleExcitation = { properties = [ "invertible", "differentiable" ] } +DoubleExcitationPlus = { properties = [ "invertible", "differentiable" ] } +DoubleExcitationMinus = { properties = [ "invertible", "differentiable" ] } +MultiRZ = { properties = [ "invertible", "differentiable" ] } +QubitUnitary = { properties = [ "invertible", ] } +GlobalPhase = { properties = [ "invertible", "differentiable" ] } # Operators that should be decomposed according to the algorithm used # by PennyLane's device API. @@ -74,14 +73,19 @@ DiagonalQubitUnitary = {} # Observables supported by the device [operators.observables] -Identity = {} -PauliX = {} -PauliY = {} -PauliZ = {} -Hadamard = {} -Hermitian = {} -Hamiltonian = {} -SparseHamiltonian = {} +Identity = { properties = [ "differentiable" ] } +PauliX = { properties = [ "differentiable" ] } +PauliY = { properties = [ "differentiable" ] } +PauliZ = { properties = [ "differentiable" ] } +Hadamard = { properties = [ "differentiable" ] } +Hermitian = { properties = [ "differentiable" ] } +Hamiltonian = { properties = [ "differentiable" ] } +SparseHamiltonian = { properties = [ "differentiable" ] } +Sum = { properties = [ "differentiable" ] } +SProd = { properties = [ "differentiable" ] } +Prod = { properties = [ "differentiable" ] } +Exp = { properties = [ "differentiable" ] } +LinearCombination = { properties = [ "differentiable" ] } [measurement_processes] @@ -90,11 +94,11 @@ Var = {} Probs = {} State = { condition = [ "analytic" ] } Sample = { condition = [ "finiteshots" ] } -Counts = { condition = [ "finiteshots" ] } +Counts = { condition = [ "finiteshots" ] } [compilation] # If the device is compatible with qjit -qjit_compatible = false +qjit_compatible = true # If the device requires run time generation of the quantum circuit. runtime_code_generation = false # If the device supports mid circuit measurements natively diff --git a/setup.py b/setup.py index 048219ed2b..e0bf04ecc9 100644 --- a/setup.py +++ b/setup.py @@ -157,6 +157,11 @@ def build_extension(self, ext: CMakeExtension): ) # Ensure that catalyst shared object is copied to the build directory for pip editable install + if backend in ("lightning_gpu"): + source = os.path.join(f"{extdir}", f"lib{backend}_catalyst.so") + destination = os.path.join(os.getcwd(), f"build_{backend}") + shutil.copy(source, destination) + if backend in ("lightning_kokkos"): if platform.system() in ["Linux", "Darwin"]: shared_lib_ext = {"Linux": ".so", "Darwin": ".dylib"}[platform.system()] diff --git a/tests/test_device.py b/tests/test_device.py index 178a0e020a..8474deec9d 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -140,6 +140,21 @@ def test_supported_linux_platform_kokkos(): assert "liblightning_kokkos_catalyst.so" in shared_lib_name +@pytest.mark.skipif( + (device_name != "lightning.gpu" or sys.platform != "linux"), + reason="This test is for LGPU under Linux only.", +) +def test_supported_linux_platform_gpu(): + """Test supported Linux platform for LGPU.""" + + dev = qml.device(device_name, wires=1) + + dev_name, shared_lib_name = dev.get_c_interface() + + assert dev_name == "LightningGPUSimulator" + assert "liblightning_gpu_catalyst.so" in shared_lib_name + + @pytest.mark.skipif( (device_name != "lightning.kokkos" or sys.platform != "darwin"), reason="This test is for Kokkos under MacOS only.", From a3564a1cde7c8a3480a9fd38ab9ff791a0b9865d Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Fri, 1 Nov 2024 16:00:03 +0000 Subject: [PATCH 04/20] update version numbers --- .github/workflows/compat-docker-release.yml | 4 ++-- pennylane_lightning/core/lightning_base.py | 2 +- requirements-dev.txt | 2 +- requirements-tests.txt | 2 +- scripts/create_lightning_rc.sh | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/compat-docker-release.yml b/.github/workflows/compat-docker-release.yml index d63f715b95..0fc8146ff3 100644 --- a/.github/workflows/compat-docker-release.yml +++ b/.github/workflows/compat-docker-release.yml @@ -14,7 +14,7 @@ jobs: name: Docker release - Linux::x86_64 uses: ./.github/workflows/docker_linux_x86_64.yml with: - lightning-version: v0.38.0_rc - pennylane-version: v0.38.0-rc0 + lightning-version: v0.39.0_rc + pennylane-version: v0.39.0-rc0 push-to-dockerhub: false secrets: inherit # pass all secrets diff --git a/pennylane_lightning/core/lightning_base.py b/pennylane_lightning/core/lightning_base.py index dd355e6a0c..3011a62233 100644 --- a/pennylane_lightning/core/lightning_base.py +++ b/pennylane_lightning/core/lightning_base.py @@ -50,7 +50,7 @@ class LightningBase(QubitDevice): OpenMP. """ - pennylane_requires = ">=0.37" + pennylane_requires = ">=0.38" version = __version__ author = "Xanadu Inc." short_name = "lightning.base" diff --git a/requirements-dev.txt b/requirements-dev.txt index 5ce2c0b9d8..ac8f7b4897 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -18,4 +18,4 @@ cmake custatevec-cu12 cutensornet-cu12 pylint==2.7.4 -git+https://github.com/PennyLaneAI/pennylane.git@master +git+https://github.com/PennyLaneAI/pennylane.git@v0.39.0-rc0 diff --git a/requirements-tests.txt b/requirements-tests.txt index 4643390dfd..5b66eb4222 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -4,4 +4,4 @@ pytest-cov>=3.0.0 pytest-mock>=3.7.0 pytest-xdist>=2.5.0 flaky>=3.7.0 -git+https://github.com/PennyLaneAI/pennylane.git@master +git+https://github.com/PennyLaneAI/pennylane.git@v0.39.0-rc0 diff --git a/scripts/create_lightning_rc.sh b/scripts/create_lightning_rc.sh index 13dee995d3..cc2681f248 100644 --- a/scripts/create_lightning_rc.sh +++ b/scripts/create_lightning_rc.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -OLDVER=0.37.0 -LVER=0.38.0 +OLDVER=0.38.0 +LVER=0.39.0 rreplace(){ grep -rl "$1" . | xargs sed -i "s|$1|$2|g" } From 35772431c71a8103a74f1122e363f89b8a0d6dc7 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Fri, 1 Nov 2024 16:04:04 +0000 Subject: [PATCH 05/20] update PR orders in the changelog --- .github/CHANGELOG.md | 60 ++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 3bc1c174cd..ad0271b5cf 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,21 +2,11 @@ ### New features since last release -* Integrate Lightning-GPU with Catalyst. - [(#928)](https://github.com/PennyLaneAI/pennylane-lightning/pull/928) - * Add `mid-circuit measurements` support to `lightning.gpu`'s single-GPU backend. [(#931)](https://github.com/PennyLaneAI/pennylane-lightning/pull/931) -* Add Matrix Product Operator (MPO) for all gates support to `lightning.tensor`. Note current C++ implementation only works for MPO sites data provided by users. - [(#859)](https://github.com/PennyLaneAI/pennylane-lightning/pull/859) - -* Add shot measurement support to `lightning.tensor`. - [(#852)](https://github.com/PennyLaneAI/pennylane-lightning/pull/852) - -* Build and upload Lightning-Tensor wheels (x86_64, AARCH64) to PyPI. - [(#862)](https://github.com/PennyLaneAI/pennylane-lightning/pull/862) - [(#905)](https://github.com/PennyLaneAI/pennylane-lightning/pull/905) +* Integrate Lightning-GPU with Catalyst. + [(#928)](https://github.com/PennyLaneAI/pennylane-lightning/pull/928) * Add `Projector` observable support via diagonalization to Lightning-GPU. [(#894)](https://github.com/PennyLaneAI/pennylane-lightning/pull/894) @@ -24,12 +14,22 @@ * Add 1-target wire controlled gate support to `lightning.tensor`. Note that `cutensornet` only supports 1-target wire controlled gate as of `v24.08`. A controlled gate with more than 1 target wire should be converted to dense matrix. [(#880)](https://github.com/PennyLaneAI/pennylane-lightning/pull/880) -* Lightning-Kokkos migrated to the new device API. - [(#810)](https://github.com/PennyLaneAI/pennylane-lightning/pull/810) +* Build and upload Lightning-Tensor wheels (x86_64, AARCH64) to PyPI. + [(#862)](https://github.com/PennyLaneAI/pennylane-lightning/pull/862) + [(#905)](https://github.com/PennyLaneAI/pennylane-lightning/pull/905) + +* Add Matrix Product Operator (MPO) for all gates support to `lightning.tensor`. Note current C++ implementation only works for MPO sites data provided by users. + [(#859)](https://github.com/PennyLaneAI/pennylane-lightning/pull/859) * Lightning-GPU migrated to the new device API. [(#853)](https://github.com/PennyLaneAI/pennylane-lightning/pull/853) +* Add shot measurement support to `lightning.tensor`. + [(#852)](https://github.com/PennyLaneAI/pennylane-lightning/pull/852) + +* Lightning-Kokkos migrated to the new device API. + [(#810)](https://github.com/PennyLaneAI/pennylane-lightning/pull/810) + ### Breaking changes * Deprecate `initSV()` and add `resetStateVector()` to `lightning.gpu`. @@ -38,12 +38,12 @@ * Deprecate PI gates implementation. [(#925)](https://github.com/PennyLaneAI/pennylane-lightning/pull/925) -* Update MacOS wheel builds to require Monterey (12.0) or greater for x86_64 and ARM. - [(#901)](https://github.com/PennyLaneAI/pennylane-lightning/pull/901) - * Remove PowerPC wheel build recipe for Lightning-Qubit. [(#902)](https://github.com/PennyLaneAI/pennylane-lightning/pull/902) +* Update MacOS wheel builds to require Monterey (12.0) or greater for x86_64 and ARM. + [(#901)](https://github.com/PennyLaneAI/pennylane-lightning/pull/901) + * Remove support for Python 3.9. [(#891)](https://github.com/PennyLaneAI/pennylane-lightning/pull/891) @@ -55,45 +55,45 @@ * Update `README.rst` installation instructions for `lightning.gpu` and `lightning.tensor`. [(#957)](https://github.com/PennyLaneAI/pennylane-lightning/pull/957) -* Optimize `GlobalPhase` and `C(GlobalPhase)` gate implementation in `lightning.gpu`. - [(#946)](https://github.com/PennyLaneAI/pennylane-lightning/pull/946) - * Add joint check for the N-controlled wires support in `lightning.qubit`. [(#949)](https://github.com/PennyLaneAI/pennylane-lightning/pull/949) +* Optimize `GlobalPhase` and `C(GlobalPhase)` gate implementation in `lightning.gpu`. + [(#946)](https://github.com/PennyLaneAI/pennylane-lightning/pull/946) + * Optimize the cartesian product to reduce the amount of memory necessary to set the StatePrep with LightningTensor. [(#943)](https://github.com/PennyLaneAI/pennylane-lightning/pull/943) * The `prob` data return `lightning.gpu` C++ layer is aligned with other state-vector backends and `lightning.gpu` supports out-of-order `qml.prob`. - [(#941)](https://github.com/PennyLaneAI/pennylane-lightning/pull/941) - -* Add `setStateVector(state, wire)` support to the `lightning.gpu` C++ layer. - [(#930)](https://github.com/PennyLaneAI/pennylane-lightning/pull/930) + [(#941)](https://github.com/PennyLaneAI/pennylane-lightning/pull/941) * Add zero-state initialization to both `StateVectorCudaManaged` and `StateVectorCudaMPI` constructors to remove the `reset_state` in the python layer ctor and refactor `setBasisState(state, wires)` in the C++ layer. [(#933)](https://github.com/PennyLaneAI/pennylane-lightning/pull/933) + +* Add `setStateVector(state, wire)` support to the `lightning.gpu` C++ layer. + [(#930)](https://github.com/PennyLaneAI/pennylane-lightning/pull/930) * The `generate_samples` methods of lightning.{qubit/kokkos} can now take in a seed number to make the generated samples deterministic. This can be useful when, among other things, fixing flaky tests in CI. [(#927)](https://github.com/PennyLaneAI/pennylane-lightning/pull/927) +* Remove dynamic decomposition rules in Lightning. + [(#926)](https://github.com/PennyLaneAI/pennylane-lightning/pull/926) + * Always decompose `qml.QFT` in Lightning. [(#924)](https://github.com/PennyLaneAI/pennylane-lightning/pull/924) * Uniform Python format to adhere PennyLane style. [(#924)](https://github.com/PennyLaneAI/pennylane-lightning/pull/924) -* Remove dynamic decomposition rules in Lightning. - [(#926)](https://github.com/PennyLaneAI/pennylane-lightning/pull/926) - * Add the `ci:use-gpu-runner` GitHub label to `lightning.kokkos` GPU Testing CIs. [(#916)](https://github.com/PennyLaneAI/pennylane-lightning/pull/916) -* Merge `lightning.gpu` and `lightning.tensor` GPU tests in single Python and C++ CIs controlled by the `ci:use-gpu-runner` label. - [(#911)](https://github.com/PennyLaneAI/pennylane-lightning/pull/911) - * Update the test suite to remove deprecated code. [(#912)](https://github.com/PennyLaneAI/pennylane-lightning/pull/912) +* Merge `lightning.gpu` and `lightning.tensor` GPU tests in single Python and C++ CIs controlled by the `ci:use-gpu-runner` label. + [(#911)](https://github.com/PennyLaneAI/pennylane-lightning/pull/911) + * Skip the compilation of Lightning simulators and development requirements to boost the build of public docs up to 5x. [(#904)](https://github.com/PennyLaneAI/pennylane-lightning/pull/904) From 22664aa6a17d92c96a4e404b36b4646d0510d4d4 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Fri, 1 Nov 2024 16:17:05 +0000 Subject: [PATCH 06/20] upload wheels --- .github/workflows/wheel_linux_aarch64.yml | 4 ++-- .github/workflows/wheel_linux_aarch64_cuda.yml | 4 ++-- .github/workflows/wheel_linux_x86_64.yml | 4 ++-- .github/workflows/wheel_linux_x86_64_cuda.yml | 4 ++-- .github/workflows/wheel_macos_arm64.yml | 4 ++-- .github/workflows/wheel_macos_x86_64.yml | 4 ++-- .github/workflows/wheel_noarch.yml | 4 ++-- .github/workflows/wheel_win_x86_64.yml | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/wheel_linux_aarch64.yml b/.github/workflows/wheel_linux_aarch64.yml index bc21a56822..dca8b650e2 100644 --- a/.github/workflows/wheel_linux_aarch64.yml +++ b/.github/workflows/wheel_linux_aarch64.yml @@ -184,7 +184,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -202,7 +202,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_aarch64_cuda.yml b/.github/workflows/wheel_linux_aarch64_cuda.yml index 4864fa0167..fb590f19ec 100644 --- a/.github/workflows/wheel_linux_aarch64_cuda.yml +++ b/.github/workflows/wheel_linux_aarch64_cuda.yml @@ -107,7 +107,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -128,7 +128,7 @@ jobs: permissions: id-token: write if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 92832fd0eb..30c82fb9fe 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -198,7 +198,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -217,7 +217,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_x86_64_cuda.yml b/.github/workflows/wheel_linux_x86_64_cuda.yml index abd30c9c65..c76be71da8 100644 --- a/.github/workflows/wheel_linux_x86_64_cuda.yml +++ b/.github/workflows/wheel_linux_x86_64_cuda.yml @@ -119,7 +119,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -141,7 +141,7 @@ jobs: permissions: id-token: write if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index ef50adb314..36f7676ab0 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -126,7 +126,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -144,7 +144,7 @@ jobs: cibw_build: ${{ fromJson(needs.mac-set-matrix-arm.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index edd631ceff..c81162f43d 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -184,7 +184,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -203,7 +203,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_noarch.yml b/.github/workflows/wheel_noarch.yml index 0414fcd7b8..50bf7f1d5a 100644 --- a/.github/workflows/wheel_noarch.yml +++ b/.github/workflows/wheel_noarch.yml @@ -86,7 +86,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | matrix.pl_backend == 'lightning_qubit' && - (github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master') + (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master') with: name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: dist/*.whl @@ -102,7 +102,7 @@ jobs: steps: - uses: actions/download-artifact@v4 - if: ${{ matrix.pl_backend == 'lightning_qubit' && github.event_name == 'release' }} + if: ${{ matrix.pl_backend == 'lightning_qubit' && github.event_name == 'pull_request' }} with: name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: dist diff --git a/.github/workflows/wheel_win_x86_64.yml b/.github/workflows/wheel_win_x86_64.yml index d2464ce9cf..4c6c5371a1 100644 --- a/.github/workflows/wheel_win_x86_64.yml +++ b/.github/workflows/wheel_win_x86_64.yml @@ -206,7 +206,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -225,7 +225,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'release' || + github.event_name == 'pull_request' || github.ref == 'refs/heads/master' steps: From 976baf684a4487f1a29f9d524b794d02d1e76997 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Fri, 1 Nov 2024 16:18:32 +0000 Subject: [PATCH 07/20] Trigger CIs From 449579bb6147a069ec9a8e285c0dd40c940271da Mon Sep 17 00:00:00 2001 From: Shuli Shu <31480676+multiphaseCFD@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:51:23 -0500 Subject: [PATCH 08/20] Fix qml.state() support for LT (#971) ### Before submitting Please complete the following checklist when submitting a PR: - [ ] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [ ] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [ ] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com> --- .github/CHANGELOG.md | 6 ++++++ doc/lightning_tensor/device.rst | 1 + tests/test_gates.py | 36 ++++++++++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index ad0271b5cf..7b591fe79b 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -49,6 +49,9 @@ ### Improvements +* Update the `lightning.tensor` Python layer unit tests, as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to use one device per circuit for `lightning.tensor`. + [(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) + * Fix PTM stable-latest. [(#961)](https://github.com/PennyLaneAI/pennylane-lightning/pull/961) @@ -129,6 +132,9 @@ ### Documentation +* Update `lightning.tensor` usage suggestions. + [(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) + * Update ``lightning.tensor`` documentation to include all the new features added since pull request #756. The new features are: 1, Finite-shot measurements; 2. Expval-base quantities; 3. Support for ``qml.state()`` and ``qml.stateprep()``; 4. Support for all gates support via Matrix Product Operator (MPO). [(#909)](https://github.com/PennyLaneAI/pennylane-lightning/pull/909) diff --git a/doc/lightning_tensor/device.rst b/doc/lightning_tensor/device.rst index cf125c07cc..7a364f0245 100644 --- a/doc/lightning_tensor/device.rst +++ b/doc/lightning_tensor/device.rst @@ -53,6 +53,7 @@ Check out the :doc:`/lightning_tensor/installation` guide for more information. .. seealso:: `DefaultTensor `__ for a CPU only tensor network simulator device. +Note that as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to create a new ``lightning.tensor`` device per circuit to ensure resources are correctly handled. Operations and observables support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/tests/test_gates.py b/tests/test_gates.py index da414c3789..c3d361836c 100644 --- a/tests/test_gates.py +++ b/tests/test_gates.py @@ -104,7 +104,7 @@ def test_gate_unitary_correct(op, op_name): if op_name == "QubitUnitary" and device_name == "lightning.tensor": pytest.skip( - "Skipping QubitUnitary on lightning.tensor. It can't be decomposed into 1-wire or 2-wire gates" + "Skipping QubitUnitary on lightning.tensor. as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to use one device per circuit for `lightning.tensor`." ) dev = qml.device(device_name, wires=wires) @@ -153,6 +153,40 @@ def output(input): assert np.allclose(unitary, unitary_expected) +@pytest.mark.parametrize("op_name", ld.operations) +def test_gate_unitary_correct_lt(op, op_name): + """Test if lightning device correctly applies gates by reconstructing the unitary matrix and + comparing to the expected version""" + + if op_name in ("BasisState", "QubitStateVector", "StatePrep"): + pytest.skip("Skipping operation because it is a state preparation") + if op == None: + pytest.skip("Skipping operation.") + + wires = len(op[2]["wires"]) + + if wires == 1 and device_name == "lightning.tensor": + pytest.skip("Skipping single wire device on lightning.tensor.") + + unitary = np.zeros((2**wires, 2**wires), dtype=np.complex128) + + for i, input in enumerate(itertools.product([0, 1], repeat=wires)): + dev = qml.device(device_name, wires=wires) + + @qml.qnode(dev) + def output(input): + qml.BasisState(input, wires=range(wires)) + op[0](*op[1], **op[2]) + return qml.state() + + out = output(np.array(input)) + unitary[:, i] = out + + unitary_expected = qml.matrix(op[0](*op[1], **op[2])) + + assert np.allclose(unitary, unitary_expected) + + @pytest.mark.parametrize("op_name", ld.operations) def test_inverse_unitary_correct(op, op_name): """Test if lightning device correctly applies inverse gates by reconstructing the unitary matrix From ab41159dcd97557a9da00e64e1d0c2931336a5ca Mon Sep 17 00:00:00 2001 From: Ali Asadi <10773383+maliasadi@users.noreply.github.com> Date: Mon, 4 Nov 2024 11:50:03 -0500 Subject: [PATCH 09/20] Update changelog (#973) ### Before submitting Please complete the following checklist when submitting a PR: - [ ] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [ ] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [ ] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --- .github/CHANGELOG.md | 99 ++++++++++++++++----------------- doc/lightning_tensor/device.rst | 2 + 2 files changed, 50 insertions(+), 51 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 7b591fe79b..4b9522820b 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,13 +2,16 @@ ### New features since last release -* Add `mid-circuit measurements` support to `lightning.gpu`'s single-GPU backend. +* Add support for out-of-order `qml.probs` in `lightning.gpu`. + [(#941)](https://github.com/PennyLaneAI/pennylane-lightning/pull/941) + +* Add mid-circuit measurements support to `lightning.gpu`'s single-GPU backend. [(#931)](https://github.com/PennyLaneAI/pennylane-lightning/pull/931) -* Integrate Lightning-GPU with Catalyst. +* Integrate Lightning-GPU with Catalyst so that hybrid programs can be seamlessly QJIT-compiled and executed on this device following `pip install pennylane-lightning-gpu`. [(#928)](https://github.com/PennyLaneAI/pennylane-lightning/pull/928) -* Add `Projector` observable support via diagonalization to Lightning-GPU. +* Add `qml.Projector` observable support via diagonalization to Lightning-GPU. [(#894)](https://github.com/PennyLaneAI/pennylane-lightning/pull/894) * Add 1-target wire controlled gate support to `lightning.tensor`. Note that `cutensornet` only supports 1-target wire controlled gate as of `v24.08`. A controlled gate with more than 1 target wire should be converted to dense matrix. @@ -21,30 +24,28 @@ * Add Matrix Product Operator (MPO) for all gates support to `lightning.tensor`. Note current C++ implementation only works for MPO sites data provided by users. [(#859)](https://github.com/PennyLaneAI/pennylane-lightning/pull/859) -* Lightning-GPU migrated to the new device API. - [(#853)](https://github.com/PennyLaneAI/pennylane-lightning/pull/853) - -* Add shot measurement support to `lightning.tensor`. +* Add shots measurement support to `lightning.tensor`. [(#852)](https://github.com/PennyLaneAI/pennylane-lightning/pull/852) -* Lightning-Kokkos migrated to the new device API. +* Lightning-GPU and Lightning-Kokkos migrated to the new device API. + [(#853)](https://github.com/PennyLaneAI/pennylane-lightning/pull/853) [(#810)](https://github.com/PennyLaneAI/pennylane-lightning/pull/810) ### Breaking changes -* Deprecate `initSV()` and add `resetStateVector()` to `lightning.gpu`. +* Deprecate `initSV()` and add `resetStateVector()` from the C++ API Lightning-GPU. This is to remove the `reset_state` additional call in the Python layer. [(#933)](https://github.com/PennyLaneAI/pennylane-lightning/pull/933) -* Deprecate PI gates implementation. +* Deprecate PI gates implementation in Lightning-Qubit. The PI gates were the first implementation of gate kernels in `lightning.qubit` using pre-computed indices, prior to the development of LM (less memory) and AVX kernels. This deprecation is in favour of reducing compilation time and ensuring that Lightning-Qubit only relies on LM kernels in the dynamic dispatcher across all platforms. [(#925)](https://github.com/PennyLaneAI/pennylane-lightning/pull/925) * Remove PowerPC wheel build recipe for Lightning-Qubit. [(#902)](https://github.com/PennyLaneAI/pennylane-lightning/pull/902) -* Update MacOS wheel builds to require Monterey (12.0) or greater for x86_64 and ARM. +* Update MacOS wheel builds to require Monterey (12.0) or greater for x86_64 and ARM. This was required to update Pybind11 to the latest release (2.13.5) for enabling Numpy 2.0 support in Lightning. [(#901)](https://github.com/PennyLaneAI/pennylane-lightning/pull/901) -* Remove support for Python 3.9. +* Remove support for Python 3.9 for all Lightning simulators. [(#891)](https://github.com/PennyLaneAI/pennylane-lightning/pull/891) ### Improvements @@ -52,37 +53,34 @@ * Update the `lightning.tensor` Python layer unit tests, as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to use one device per circuit for `lightning.tensor`. [(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) -* Fix PTM stable-latest. - [(#961)](https://github.com/PennyLaneAI/pennylane-lightning/pull/961) - -* Update `README.rst` installation instructions for `lightning.gpu` and `lightning.tensor`. - [(#957)](https://github.com/PennyLaneAI/pennylane-lightning/pull/957) - * Add joint check for the N-controlled wires support in `lightning.qubit`. [(#949)](https://github.com/PennyLaneAI/pennylane-lightning/pull/949) * Optimize `GlobalPhase` and `C(GlobalPhase)` gate implementation in `lightning.gpu`. [(#946)](https://github.com/PennyLaneAI/pennylane-lightning/pull/946) -* Optimize the cartesian product to reduce the amount of memory necessary to set the StatePrep with LightningTensor. +* Add missing `liblightning_kokkos_catalyst.so` when building Lightning-Kokkos in editable installation. + [(#945)](https://github.com/PennyLaneAI/pennylane-lightning/pull/945) + +* Optimize the cartesian product to reduce the amount of memory necessary to set the `StatePrep` in Lightning-Tensor. [(#943)](https://github.com/PennyLaneAI/pennylane-lightning/pull/943) -* The `prob` data return `lightning.gpu` C++ layer is aligned with other state-vector backends and `lightning.gpu` supports out-of-order `qml.prob`. +* Update the `qml.probs` data-return in Lightning-GPU C++ API to align with other state-vector devices. [(#941)](https://github.com/PennyLaneAI/pennylane-lightning/pull/941) -* Add zero-state initialization to both `StateVectorCudaManaged` and `StateVectorCudaMPI` constructors to remove the `reset_state` in the python layer ctor and refactor `setBasisState(state, wires)` in the C++ layer. +* Add zero-state initialization to both `StateVectorCudaManaged` and `StateVectorCudaMPI` constructors to remove the `reset_state` in the Python layer ctor and refactor `setBasisState(state, wires)` in the C++ API. [(#933)](https://github.com/PennyLaneAI/pennylane-lightning/pull/933) -* Add `setStateVector(state, wire)` support to the `lightning.gpu` C++ layer. +* Add `setStateVector(state, wire)` support to the Lightning-GPU C++ API. [(#930)](https://github.com/PennyLaneAI/pennylane-lightning/pull/930) - -* The `generate_samples` methods of lightning.{qubit/kokkos} can now take in a seed number to make the generated samples deterministic. This can be useful when, among other things, fixing flaky tests in CI. + +* The `generate_samples` methods of `lightning.qubit` and `lightning.kokkos` can now take in a seed number to make the generated samples deterministic. This can be useful when, among other things, fixing flaky tests in CI. [(#927)](https://github.com/PennyLaneAI/pennylane-lightning/pull/927) -* Remove dynamic decomposition rules in Lightning. +* Remove dynamic decomposition rules for all Lightning devices. [(#926)](https://github.com/PennyLaneAI/pennylane-lightning/pull/926) -* Always decompose `qml.QFT` in Lightning. +* Always decompose `qml.QFT` in all Lightning devices. [(#924)](https://github.com/PennyLaneAI/pennylane-lightning/pull/924) * Uniform Python format to adhere PennyLane style. @@ -100,7 +98,7 @@ * Skip the compilation of Lightning simulators and development requirements to boost the build of public docs up to 5x. [(#904)](https://github.com/PennyLaneAI/pennylane-lightning/pull/904) -* Build Lightning wheels in `Release` mode. +* Build Lightning wheels in `Release` mode to reduce the binary sizes. [(#903)](https://github.com/PennyLaneAI/pennylane-lightning/pull/903) * Update Pybind11 to 2.13.5. @@ -115,10 +113,10 @@ * Optimize and simplify controlled kernels in Lightning-Qubit. [(#882)](https://github.com/PennyLaneAI/pennylane-lightning/pull/882) -* Optimize gate cache recording for `lightning.tensor` C++ layer. +* Optimize gate cache recording for Lightning-Tensor C++ API. [(#879)](https://github.com/PennyLaneAI/pennylane-lightning/pull/879) -* Unify Lightning-Kokkos device and Lightning-Qubit device under a Lightning Base device. +* Unify Lightning-Kokkos and Lightning-Qubit devices under a Lightning-Base abstracted class. [(#876)](https://github.com/PennyLaneAI/pennylane-lightning/pull/876) * Smarter defaults for the `split_obs` argument in the serializer. The serializer splits linear combinations into chunks instead of all their terms. @@ -127,7 +125,7 @@ * Prefer `tomlkit` over `toml` for building Lightning wheels, and choose `tomli` and `tomllib` over `toml` when installing the package. [(#857)](https://github.com/PennyLaneAI/pennylane-lightning/pull/857) -* LightningKokkos gains native support for the `PauliRot` gate. +* Lightning-Kokkos gains native support for the `PauliRot` gate. [(#855)](https://github.com/PennyLaneAI/pennylane-lightning/pull/855) ### Documentation @@ -135,33 +133,32 @@ * Update `lightning.tensor` usage suggestions. [(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) -* Update ``lightning.tensor`` documentation to include all the new features added since pull request #756. The new features are: 1, Finite-shot measurements; 2. Expval-base quantities; 3. Support for ``qml.state()`` and ``qml.stateprep()``; 4. Support for all gates support via Matrix Product Operator (MPO). +* Update `README.rst` installation instructions for `lightning.gpu` and `lightning.tensor`. + [(#957)](https://github.com/PennyLaneAI/pennylane-lightning/pull/957) + +* Update `lightning.tensor` documentation to include all the new features added since pull request #756. The new features are: 1. Finite-shot measurements; 2. Expval-base quantities; 3. Support for `qml.state()` and `qml.stateprep()`; 4. Support for all gates support via Matrix Product Operator (MPO). [(#909)](https://github.com/PennyLaneAI/pennylane-lightning/pull/909) ### Bug fixes -* Fix `liblightning_kokkos_catalyst.so` not copied to correct build path for editable installation. - [(#968)](https://github.com/PennyLaneAI/pennylane-lightning/pull/968) - -* Fix PTM stable latest related to `default.qubit.legacy`. +* Fix PTM stable-latest related to `default.qubit.legacy` and the `latest` flag usage. + [(#961)](https://github.com/PennyLaneAI/pennylane-lightning/pull/961) [(#966)](https://github.com/PennyLaneAI/pennylane-lightning/pull/966) -* Fix build failure for Lightning-Kokkos editable installation on MacOS due to `liblightning_kokkos_catalyst.so` copy. +* Fix build failure for Lightning-Kokkos editable installation on MacOS due to `liblightning_kokkos_catalyst.so` copy and `liblightning_kokkos_catalyst.so` not copied to correct build path for editable installation. [(#947)](https://github.com/PennyLaneAI/pennylane-lightning/pull/947) - -* Fix missing `liblightning_kokkos_catalyst.so` in Lightning-Kokkos editable installation. - [(#945)](https://github.com/PennyLaneAI/pennylane-lightning/pull/945) + [(#968)](https://github.com/PennyLaneAI/pennylane-lightning/pull/968) * Add concept restriction to ensure `ConstMult` inline function only hit with arithmetic-values times complex values. Fixes build failures with the test suite when enabling OpenMP, and disabling BLAS and Python under clang. [(#936)](https://github.com/PennyLaneAI/pennylane-lightning/pull/936) -* Bug fix for `applyMatrix` in `lightning.tensor`. Matrix operator data is not stored in the `cuGateCache` object to support `TensorProd` obs with multiple `Hermitian` obs. +* Bug fix for `applyMatrix` in Lightning-Tensor. Matrix operator data is not stored in the `cuGateCache` object to support `TensorProd` obs with multiple `Hermitian` obs. [(#932)](https://github.com/PennyLaneAI/pennylane-lightning/pull/932) * Bug fix for `_pauli_word` of `QuantumScriptSerializer`. `_pauli_word` can process `PauliWord` object: `I`. [(#919)](https://github.com/PennyLaneAI/pennylane-lightning/pull/919) -* Bug fix for analytic `probs` in the `lightning.tensor` C++ layer. +* Bug fix for analytic `qml.probs` in the Lightning-Tensor C++ API. [(#906)](https://github.com/PennyLaneAI/pennylane-lightning/pull/906) ### Contributors @@ -219,7 +216,7 @@ Ali Asadi, Amintor Dusko, Joseph Lee, Luis Alfredo Nuñez Meneses, Vincent Micha * Update Lightning tests to support the generalization of basis state preparation. [(#864)](https://github.com/PennyLaneAI/pennylane-lightning/pull/864) -* Add `SetState` and `SetBasisState` to `LightningKokkosSimulator`. +* Add `SetState` and `SetBasisState` to `Lightning-KokkosSimulator`. [(#861)](https://github.com/PennyLaneAI/pennylane-lightning/pull/861) * Remove use of the deprecated `Operator.expand` in favour of `Operator.decomposition`. @@ -231,7 +228,7 @@ Ali Asadi, Amintor Dusko, Joseph Lee, Luis Alfredo Nuñez Meneses, Vincent Micha * Move `setBasisState`, `setStateVector` and `resetStateVector` from `StateVectorLQubitManaged` to `StateVectorLQubit`. [(#841)](https://github.com/PennyLaneAI/pennylane-lightning/pull/841) -* Update `generate_samples` in `LightningKokkos` and `LightningGPU` to support `qml.measurements.Shots` type instances. +* Update `generate_samples` in Lightning-Kokkos and Lightning-GPU to support `qml.measurements.Shots` type instances. [(#839)](https://github.com/PennyLaneAI/pennylane-lightning/pull/839) * Add a Catalyst-specific wrapping class for Lightning Kokkos. @@ -636,19 +633,19 @@ Ali Asadi, Amintor Dusko, Lillian Frederiksen, Pietropaolo Frisoni, David Ittah, ### Bug fixes -* Fix wire order permutations when using `qml.probs` with out-of-order wires. +* Fix wire order permutations when using `qml.probs` with out-of-order wires in Lightning-Qubit. [(#707)](https://github.com/PennyLaneAI/pennylane-lightning/pull/707) -* Lightning Qubit once again respects the wire order specified on device instantiation. +* Lightning-Qubit once again respects the wire order specified on device instantiation. [(#705)](https://github.com/PennyLaneAI/pennylane-lightning/pull/705) -* `dynamic_one_shot` was refactored to use `SampleMP` measurements as a way to return the mid-circuit measurement samples. `LightningQubit`'s `simulate` is modified accordingly. +* `dynamic_one_shot` was refactored to use `SampleMP` measurements as a way to return the mid-circuit measurement samples. `LightningQubit's `simulate` is modified accordingly. [(#694)](https://github.com/PennyLaneAI/pennylane-lightning/pull/694) -* `LightningQubit` correctly decomposes state prep operations when used in the middle of a circuit. +* Lightning-Qubit correctly decomposes state prep operations when used in the middle of a circuit. [(#687)](https://github.com/PennyLaneAI/pennylane-lightning/pull/687) -* `LightningQubit` correctly decomposes `qml.QFT` and `qml.GroverOperator` if `len(wires)` is greater than 9 and 12 respectively. +* Lightning-Qubit correctly decomposes `qml.QFT` and `qml.GroverOperator` if `len(wires)` is greater than 9 and 12 respectively. [(#687)](https://github.com/PennyLaneAI/pennylane-lightning/pull/687) * Specify `isort` `--py` (Python version) and `-l` (max line length) to stabilize `isort` across Python versions and environments. @@ -663,7 +660,7 @@ Ali Asadi, Amintor Dusko, Lillian Frederiksen, Pietropaolo Frisoni, David Ittah, * Fix the failed observable serialization unit tests. [(#683)](https://github.com/PennyLaneAI/pennylane-lightning/pull/683) -* Update the `LightningQubit` new device API to work with Catalyst. +* Update the Lightning-Qubit new device API to work with Catalyst. [(#665)](https://github.com/PennyLaneAI/pennylane-lightning/pull/665) * Update the version of `codecov-action` to v4 and fix the CodeCov issue with the PL-Lightning check-compatibility actions. @@ -1076,7 +1073,7 @@ Ali Asadi, Amintor Dusko, Vincent Michaud-Rioux, Lee J. O'Riordan, Shuli Shu ### Breaking changes -* Rename `QubitStateVector` to `StatePrep` in the `LightningQubit` and `LightningKokkos` classes. +* Rename `QubitStateVector` to `StatePrep` in the Lightning-Qubit and `Lightning-Kokkos` classes. [(#486)](https://github.com/PennyLaneAI/pennylane-lightning/pull/486) * Modify `adjointJacobian` methods to accept a (maybe unused) reference `StateVectorT`, allowing device-backed simulators to directly access state vector data for adjoint differentiation instead of copying it back-and-forth into `JacobianData` (host memory). @@ -2094,7 +2091,7 @@ Thomas Bromley, Theodor Isacsson, Christina Lee, Thomas Loke, Antal Száva. ### Bug fixes -* Fixes a bug where the `QNode` would swap `LightningQubit` to +* Fixes a bug where the `QNode` would swap Lightning-Qubit to `DefaultQubitAutograd` on device execution due to the inherited `passthru_devices` entry of the `capabilities` dictionary. [(#61)](https://github.com/PennyLaneAI/pennylane-lightning/pull/61) diff --git a/doc/lightning_tensor/device.rst b/doc/lightning_tensor/device.rst index 7a364f0245..68e3b3122d 100644 --- a/doc/lightning_tensor/device.rst +++ b/doc/lightning_tensor/device.rst @@ -54,6 +54,8 @@ Check out the :doc:`/lightning_tensor/installation` guide for more information. .. seealso:: `DefaultTensor `__ for a CPU only tensor network simulator device. Note that as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to create a new ``lightning.tensor`` device per circuit to ensure resources are correctly handled. + + Operations and observables support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 035b63de739fd1973244c9942b721711b6ecdd5a Mon Sep 17 00:00:00 2001 From: Ali Asadi <10773383+maliasadi@users.noreply.github.com> Date: Mon, 4 Nov 2024 11:55:02 -0500 Subject: [PATCH 10/20] Update rc version --- pennylane_lightning/core/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 85c5379edb..9b88a6af81 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc0" +__version__ = "0.39.0-rc1" From b830ceeed721a349e5506e80fd73895658af44ec Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 18:01:57 +0000 Subject: [PATCH 11/20] update docs --- doc/lightning_tensor/device.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/lightning_tensor/device.rst b/doc/lightning_tensor/device.rst index 68e3b3122d..641a7f7622 100644 --- a/doc/lightning_tensor/device.rst +++ b/doc/lightning_tensor/device.rst @@ -53,7 +53,7 @@ Check out the :doc:`/lightning_tensor/installation` guide for more information. .. seealso:: `DefaultTensor `__ for a CPU only tensor network simulator device. -Note that as `lightning.tensor` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to create a new ``lightning.tensor`` device per circuit to ensure resources are correctly handled. +Note that as ``lightning.tensor`` cannot be cleaned up like other state-vector devices because the data is attached to the graph. It is recommended to create a new ``lightning.tensor`` device per circuit to ensure resources are correctly handled. Operations and observables support From 181a24656205bd5dea89674abaf55d39c34def6c Mon Sep 17 00:00:00 2001 From: Diego <67476785+DSGuala@users.noreply.github.com> Date: Mon, 4 Nov 2024 13:49:05 -0500 Subject: [PATCH 12/20] Update Lightning-Tensor install docs (#972) ### Before submitting **Context:** Lightning tensor docs did not describe installation via `pip install` **Description of the Change:** - Added instruction to `pip install cutensornet-cu12` and `pip install pennylane-lightning-tensor` - Added warning about installing from source. **Benefits:** Clearer installation docs. **Possible Drawbacks:** Duplication of information in docs and pennylane.ai/install **Related GitHub Issues:** --------- Co-authored-by: ANTH0NY <39093564+AntonNI8@users.noreply.github.com> Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com> --- .github/CHANGELOG.md | 5 +++-- README.rst | 12 ++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 4b9522820b..d610d3a965 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -130,8 +130,9 @@ ### Documentation -* Update `lightning.tensor` usage suggestions. +* Update Lightning-Tensor installation docs and usage suggestions. [(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) + [(#972)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971) * Update `README.rst` installation instructions for `lightning.gpu` and `lightning.tensor`. [(#957)](https://github.com/PennyLaneAI/pennylane-lightning/pull/957) @@ -165,7 +166,7 @@ This release contains contributions from (in alphabetical order): -Ali Asadi, Amintor Dusko, Joseph Lee, Luis Alfredo Nuñez Meneses, Vincent Michaud-Rioux, Lee J. O'Riordan, Mudit Pandey, Shuli Shu, Haochen Paul Wang +Ali Asadi, Amintor Dusko, Diego Guala, Joseph Lee, Luis Alfredo Nuñez Meneses, Vincent Michaud-Rioux, Lee J. O'Riordan, Mudit Pandey, Shuli Shu, Haochen Paul Wang --- diff --git a/README.rst b/README.rst index f7f09d5fdf..8fa3eb6066 100644 --- a/README.rst +++ b/README.rst @@ -383,8 +383,20 @@ Lightning-Tensor requires CUDA 12 and the `cuQuantum SDK `_ install guide for more information. +Lightning-Tensor and ``cutensornet-cu12`` can be installed via: + +.. code-block:: bash + + pip install cutensornet-cu12 + pip install pennylane-lightning-tensor + Install Lightning-Tensor from source ==================================== + +.. note:: + + The below contains instructions for installing Lightning-Tensor ***from source***. For most cases, *this is not required* and one can simply use the installation instructions at `pennylane.ai/install `__. If those instructions do not work for you, or you have a more complex build environment that requires building from source, then consider reading on. + Lightning-Qubit should be installed before Lightning-Tensor (compilation is not necessary): .. code-block:: bash From 3bb6d5fe934cac34499f152d9b6bbd7f7ed1e937 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 18:50:20 +0000 Subject: [PATCH 13/20] update rc version --- pennylane_lightning/core/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 9b88a6af81..97de166686 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc1" +__version__ = "0.39.0-rc2" From d5e064dc8e95d4e58ba3bf7001783925c6c5eac5 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 19:56:07 +0000 Subject: [PATCH 14/20] macos target from 12.0 to 13.0 --- .github/workflows/wheel_macos_arm64.yml | 2 +- .github/workflows/wheel_macos_x86_64.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index 36f7676ab0..3e42c4b939 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -23,7 +23,7 @@ env: ARCHS: 'arm64' PYTHON3_MIN_VERSION: "10" PYTHON3_MAX_VERSION: "12" - MACOSX_DEPLOYMENT_TARGET: 12.0 + MACOSX_DEPLOYMENT_TARGET: 13.0 concurrency: group: wheel_macos_arm64-${{ github.ref }} diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index c81162f43d..4bd3d98544 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -20,7 +20,7 @@ on: workflow_dispatch: env: - MACOSX_DEPLOYMENT_TARGET: 12.0 + MACOSX_DEPLOYMENT_TARGET: 13.0 concurrency: group: wheel_macos_x86_64-${{ github.ref }} From 956b2bde59b794b7c9e51424ad7bb4b2892801f1 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 19:57:25 +0000 Subject: [PATCH 15/20] update to rc3 --- pennylane_lightning/core/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 97de166686..6bdfb91d20 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc2" +__version__ = "0.39.0-rc3" From c6d5a2859e93ad3be445e2f5a5d9a641ec64e47f Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 20:02:40 +0000 Subject: [PATCH 16/20] test macos14.0 --- .github/workflows/wheel_macos_arm64.yml | 2 +- .github/workflows/wheel_macos_x86_64.yml | 2 +- pennylane_lightning/core/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index 3e42c4b939..337f3771b0 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -23,7 +23,7 @@ env: ARCHS: 'arm64' PYTHON3_MIN_VERSION: "10" PYTHON3_MAX_VERSION: "12" - MACOSX_DEPLOYMENT_TARGET: 13.0 + MACOSX_DEPLOYMENT_TARGET: 14.0 concurrency: group: wheel_macos_arm64-${{ github.ref }} diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index 4bd3d98544..eac609913d 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -20,7 +20,7 @@ on: workflow_dispatch: env: - MACOSX_DEPLOYMENT_TARGET: 13.0 + MACOSX_DEPLOYMENT_TARGET: 14.0 concurrency: group: wheel_macos_x86_64-${{ github.ref }} diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 6bdfb91d20..1181e55ffc 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc3" +__version__ = "0.39.0-rc4" From 84dd86ada85e8533bca98eda5ecaa0c191f99863 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 20:06:42 +0000 Subject: [PATCH 17/20] update macos-12 wheel labels --- .github/workflows/wheel_macos_arm64.yml | 4 ++-- .github/workflows/wheel_macos_x86_64.yml | 6 +++--- pennylane_lightning/core/_version.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index 337f3771b0..281e910891 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -23,7 +23,7 @@ env: ARCHS: 'arm64' PYTHON3_MIN_VERSION: "10" PYTHON3_MAX_VERSION: "12" - MACOSX_DEPLOYMENT_TARGET: 14.0 + MACOSX_DEPLOYMENT_TARGET: 13.0 concurrency: group: wheel_macos_arm64-${{ github.ref }} @@ -63,7 +63,7 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12] + os: [macos-13] arch: [arm64] pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{fromJson(needs.mac-set-matrix-arm.outputs.python_version)}} diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index eac609913d..30343ada19 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -20,7 +20,7 @@ on: workflow_dispatch: env: - MACOSX_DEPLOYMENT_TARGET: 14.0 + MACOSX_DEPLOYMENT_TARGET: 13.0 concurrency: group: wheel_macos_x86_64-${{ github.ref }} @@ -40,7 +40,7 @@ jobs: needs: [set_wheel_build_matrix] strategy: matrix: - os: [macos-12] + os: [macos-13] arch: [x86_64] exec_model: ${{ fromJson(needs.set_wheel_build_matrix.outputs.exec_model) }} kokkos_version: ${{ fromJson(needs.set_wheel_build_matrix.outputs.kokkos_version) }} @@ -97,7 +97,7 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12] + os: [macos-13] arch: [x86_64] pl_backend: ["lightning_kokkos", "lightning_qubit"] cibw_build: ${{fromJson(needs.set_wheel_build_matrix.outputs.python_version)}} diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 1181e55ffc..040f9a87b6 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc4" +__version__ = "0.39.0-rc5" From 06043d8173d94d9ee1a60f4490668a246c689b14 Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 20:39:01 +0000 Subject: [PATCH 18/20] add changelog entry for the macos-12 deprecation --- .github/CHANGELOG.md | 3 +++ pennylane_lightning/core/_version.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index d610d3a965..17065b3d5b 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -33,6 +33,9 @@ ### Breaking changes +* Update MacOS wheel build to 13.0 for X86_64 and ARM due to the deprecation of MacOS-12 CI runners. + [(#969)](https://github.com/PennyLaneAI/pennylane-lightning/pull/969) + * Deprecate `initSV()` and add `resetStateVector()` from the C++ API Lightning-GPU. This is to remove the `reset_state` additional call in the Python layer. [(#933)](https://github.com/PennyLaneAI/pennylane-lightning/pull/933) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 040f9a87b6..e3612df3c2 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc5" +__version__ = "0.39.0-rc6" From cf828ac9b736b000ff12752f8e76b019fc220742 Mon Sep 17 00:00:00 2001 From: Joseph Lee <40768758+josephleekl@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:35:54 -0500 Subject: [PATCH 19/20] Lightning Kokkos arg test fix for MacOS (#974) ### Before submitting Please complete the following checklist when submitting a PR: - [ ] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [x] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [ ] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --- .github/CHANGELOG.md | 3 +++ pennylane_lightning/core/_version.py | 2 +- tests/test_device.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 17065b3d5b..3376120050 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -145,6 +145,9 @@ ### Bug fixes +* Fix Lightning Kokkos test_device for `kokkos_args` fail for MacOS due to `np.complex256` + [(#974)](https://github.com/PennyLaneAI/pennylane-lightning/pull/974) + * Fix PTM stable-latest related to `default.qubit.legacy` and the `latest` flag usage. [(#961)](https://github.com/PennyLaneAI/pennylane-lightning/pull/961) [(#966)](https://github.com/PennyLaneAI/pennylane-lightning/pull/966) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index e3612df3c2..b5d5a365df 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc6" +__version__ = "0.39.0-rc7" diff --git a/tests/test_device.py b/tests/test_device.py index 8474deec9d..50b631862c 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -50,7 +50,7 @@ def test_create_device_with_unsupported_dtype(): ) def test_create_device_with_unsupported_kokkos_args(): with pytest.raises(TypeError, match="Argument kokkos_args must be of type .* but it is of .*."): - dev = qml.device(device_name, wires=1, kokkos_args=np.complex256) + dev = qml.device(device_name, wires=1, kokkos_args=np.complex128) @pytest.mark.skipif( From dcea959bafd1b01518dbd7746f718343757c68fd Mon Sep 17 00:00:00 2001 From: Shuli Shu <08cnbj@gmail.com> Date: Mon, 4 Nov 2024 22:49:29 +0000 Subject: [PATCH 20/20] Forked as v0.39.0_release to be released with tag v0.39.0. --- .github/workflows/wheel_linux_aarch64.yml | 4 ++-- .github/workflows/wheel_linux_aarch64_cuda.yml | 4 ++-- .github/workflows/wheel_linux_x86_64.yml | 4 ++-- .github/workflows/wheel_linux_x86_64_cuda.yml | 4 ++-- .github/workflows/wheel_macos_arm64.yml | 4 ++-- .github/workflows/wheel_macos_x86_64.yml | 4 ++-- .github/workflows/wheel_noarch.yml | 4 ++-- .github/workflows/wheel_win_x86_64.yml | 4 ++-- pennylane_lightning/core/_version.py | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/wheel_linux_aarch64.yml b/.github/workflows/wheel_linux_aarch64.yml index dca8b650e2..bc21a56822 100644 --- a/.github/workflows/wheel_linux_aarch64.yml +++ b/.github/workflows/wheel_linux_aarch64.yml @@ -184,7 +184,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -202,7 +202,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_aarch64_cuda.yml b/.github/workflows/wheel_linux_aarch64_cuda.yml index fb590f19ec..4864fa0167 100644 --- a/.github/workflows/wheel_linux_aarch64_cuda.yml +++ b/.github/workflows/wheel_linux_aarch64_cuda.yml @@ -107,7 +107,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -128,7 +128,7 @@ jobs: permissions: id-token: write if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 30c82fb9fe..92832fd0eb 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -198,7 +198,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -217,7 +217,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_linux_x86_64_cuda.yml b/.github/workflows/wheel_linux_x86_64_cuda.yml index c76be71da8..abd30c9c65 100644 --- a/.github/workflows/wheel_linux_x86_64_cuda.yml +++ b/.github/workflows/wheel_linux_x86_64_cuda.yml @@ -119,7 +119,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -141,7 +141,7 @@ jobs: permissions: id-token: write if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_macos_arm64.yml b/.github/workflows/wheel_macos_arm64.yml index 281e910891..0a6945c3c6 100644 --- a/.github/workflows/wheel_macos_arm64.yml +++ b/.github/workflows/wheel_macos_arm64.yml @@ -126,7 +126,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' with: @@ -144,7 +144,7 @@ jobs: cibw_build: ${{ fromJson(needs.mac-set-matrix-arm.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_macos_x86_64.yml b/.github/workflows/wheel_macos_x86_64.yml index 30343ada19..ce247522a4 100644 --- a/.github/workflows/wheel_macos_x86_64.yml +++ b/.github/workflows/wheel_macos_x86_64.yml @@ -184,7 +184,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -203,7 +203,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/.github/workflows/wheel_noarch.yml b/.github/workflows/wheel_noarch.yml index 50bf7f1d5a..0414fcd7b8 100644 --- a/.github/workflows/wheel_noarch.yml +++ b/.github/workflows/wheel_noarch.yml @@ -86,7 +86,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | matrix.pl_backend == 'lightning_qubit' && - (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master') + (github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master') with: name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: dist/*.whl @@ -102,7 +102,7 @@ jobs: steps: - uses: actions/download-artifact@v4 - if: ${{ matrix.pl_backend == 'lightning_qubit' && github.event_name == 'pull_request' }} + if: ${{ matrix.pl_backend == 'lightning_qubit' && github.event_name == 'release' }} with: name: pure-python-wheels-${{ matrix.pl_backend }}.zip path: dist diff --git a/.github/workflows/wheel_win_x86_64.yml b/.github/workflows/wheel_win_x86_64.yml index 4c6c5371a1..d2464ce9cf 100644 --- a/.github/workflows/wheel_win_x86_64.yml +++ b/.github/workflows/wheel_win_x86_64.yml @@ -206,7 +206,7 @@ jobs: - uses: actions/upload-artifact@v4 if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || steps.rc_build.outputs.match != '' @@ -225,7 +225,7 @@ jobs: cibw_build: ${{ fromJson(needs.set_wheel_build_matrix.outputs.python_version) }} runs-on: ubuntu-latest if: | - github.event_name == 'pull_request' || + github.event_name == 'release' || github.ref == 'refs/heads/master' steps: diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index b5d5a365df..ce245aae35 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.39.0-rc7" +__version__ = "0.39.0"