diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 35c945f62b..7411cd98f2 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -15,6 +15,9 @@ ### Improvements +* Refactor CUDA utils Python bindings to a separate module. + [(#801)](https://github.com/PennyLaneAI/pennylane-lightning/pull/801) + * Parallelize Lightning-Qubit `probs` with OpenMP when using the `-DLQ_ENABLE_KERNEL_OMP=1` CMake argument. [(#800)](https://github.com/PennyLaneAI/pennylane-lightning/pull/800) @@ -47,7 +50,7 @@ This release contains contributions from (in alphabetical order): -Amintor Dusko, Vincent Michaud-Rioux +Amintor Dusko, Vincent Michaud-Rioux, Shuli Shu --- diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 02e7041e1a..0ddab39d02 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.38.0-dev8" +__version__ = "0.38.0-dev9" diff --git a/pennylane_lightning/core/src/bindings/BindingsCudaUtils.hpp b/pennylane_lightning/core/src/bindings/BindingsCudaUtils.hpp new file mode 100644 index 0000000000..2a04ad4d63 --- /dev/null +++ b/pennylane_lightning/core/src/bindings/BindingsCudaUtils.hpp @@ -0,0 +1,101 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file BindingsCudaUtils.hpp + * Defines CUDA device - specific operations to export to Python, other + * utility functions interfacing with Pybind11 and support to agnostic bindings. + */ + +#pragma once + +#include "BindingsBase.hpp" +#include "DevTag.hpp" +#include "DevicePool.hpp" +#include "cuda_helpers.hpp" + +/// @cond DEV +namespace { +using namespace Pennylane; +using namespace Pennylane::Bindings; +} // namespace +/// @endcond + +namespace py = pybind11; + +namespace Pennylane::LightningGPU::Util { +/** + * @brief Register bindings for CUDA utils. + * + * @param m Pybind11 module. + */ +void registerCudaUtils(py::module_ &m) { + m.def("device_reset", &deviceReset, "Reset all GPU devices and contexts."); + m.def("allToAllAccess", []() { + for (int i = 0; i < static_cast(getGPUCount()); i++) { + cudaDeviceEnablePeerAccess(i, 0); + } + }); + + m.def("is_gpu_supported", &isCuQuantumSupported, + py::arg("device_number") = 0, + "Checks if the given GPU device meets the minimum architecture " + "support for the PennyLane-Lightning-GPU device."); + + m.def("get_gpu_arch", &getGPUArch, py::arg("device_number") = 0, + "Returns the given GPU major and minor GPU support."); + py::class_>(m, "DevPool") + .def(py::init<>()) + .def("getActiveDevices", &DevicePool::getActiveDevices) + .def("isActive", &DevicePool::isActive) + .def("isInactive", &DevicePool::isInactive) + .def("acquireDevice", &DevicePool::acquireDevice) + .def("releaseDevice", &DevicePool::releaseDevice) + .def("syncDevice", &DevicePool::syncDevice) + .def("refresh", &DevicePool::refresh) + .def_static("getTotalDevices", &DevicePool::getTotalDevices) + .def_static("getDeviceUIDs", &DevicePool::getDeviceUIDs) + .def_static("setDeviceID", &DevicePool::setDeviceIdx) + .def(py::pickle( + []([[maybe_unused]] const DevicePool &self) { // __getstate__ + return py::make_tuple(); + }, + [](py::tuple &t) { // __setstate__ + if (t.size() != 0) { + throw std::runtime_error("Invalid state!"); + } + return DevicePool{}; + })); + + py::class_>(m, "DevTag") + .def(py::init<>()) + .def(py::init()) + .def(py::init([](int device_id, void *stream_id) { + // Note, streams must be handled externally for now. + // Binding support provided through void* conversion to cudaStream_t + return new DevTag(device_id, + static_cast(stream_id)); + })) + .def(py::init &>()) + .def("getDeviceID", &DevTag::getDeviceID) + .def("getStreamID", + [](DevTag &dev_tag) { + // default stream points to nullptr, so just return void* as + // type + return static_cast(dev_tag.getStreamID()); + }) + .def("refresh", &DevTag::refresh); +} + +} // namespace Pennylane::LightningGPU::Util diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/bindings/LGPUBindings.hpp b/pennylane_lightning/core/src/simulators/lightning_gpu/bindings/LGPUBindings.hpp index 4ebb907791..5bd92b5520 100644 --- a/pennylane_lightning/core/src/simulators/lightning_gpu/bindings/LGPUBindings.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/bindings/LGPUBindings.hpp @@ -20,6 +20,7 @@ #include "cuda.h" #include "BindingsBase.hpp" +#include "BindingsCudaUtils.hpp" #include "Constant.hpp" #include "ConstantUtil.hpp" // lookup #include "DevTag.hpp" @@ -362,62 +363,7 @@ auto getBackendInfo() -> py::dict { */ void registerBackendSpecificInfo(py::module_ &m) { m.def("backend_info", &getBackendInfo, "Backend-specific information."); - m.def("device_reset", &deviceReset, "Reset all GPU devices and contexts."); - m.def("allToAllAccess", []() { - for (int i = 0; i < static_cast(getGPUCount()); i++) { - cudaDeviceEnablePeerAccess(i, 0); - } - }); - - m.def("is_gpu_supported", &isCuQuantumSupported, - py::arg("device_number") = 0, - "Checks if the given GPU device meets the minimum architecture " - "support for the PennyLane-Lightning-GPU device."); - - m.def("get_gpu_arch", &getGPUArch, py::arg("device_number") = 0, - "Returns the given GPU major and minor GPU support."); - py::class_>(m, "DevPool") - .def(py::init<>()) - .def("getActiveDevices", &DevicePool::getActiveDevices) - .def("isActive", &DevicePool::isActive) - .def("isInactive", &DevicePool::isInactive) - .def("acquireDevice", &DevicePool::acquireDevice) - .def("releaseDevice", &DevicePool::releaseDevice) - .def("syncDevice", &DevicePool::syncDevice) - .def("refresh", &DevicePool::refresh) - .def_static("getTotalDevices", &DevicePool::getTotalDevices) - .def_static("getDeviceUIDs", &DevicePool::getDeviceUIDs) - .def_static("setDeviceID", &DevicePool::setDeviceIdx) - .def(py::pickle( - []([[maybe_unused]] const DevicePool &self) { // __getstate__ - return py::make_tuple(); - }, - [](py::tuple &t) { // __setstate__ - if (t.size() != 0) { - throw std::runtime_error("Invalid state!"); - } - return DevicePool{}; - })); - - py::class_>(m, "DevTag") - .def(py::init<>()) - .def(py::init()) - .def(py::init([](int device_id, void *stream_id) { - // Note, streams must be handled externally for now. - // Binding support provided through void* conversion to cudaStream_t - return new DevTag(device_id, - static_cast(stream_id)); - })) - .def(py::init &>()) - .def("getDeviceID", &DevTag::getDeviceID) - .def("getStreamID", - [](DevTag &dev_tag) { - // default stream points to nullptr, so just return void* as - // type - return static_cast(dev_tag.getStreamID()); - }) - .def("refresh", &DevTag::refresh); + registerCudaUtils(m); } } // namespace Pennylane::LightningGPU - /// @endcond diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp index 83f2187dc0..39bb9fa61c 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp @@ -24,6 +24,7 @@ #include "cuda.h" #include "BindingsBase.hpp" +#include "BindingsCudaUtils.hpp" #include "DevTag.hpp" #include "DevicePool.hpp" #include "Error.hpp" @@ -35,6 +36,7 @@ namespace { using namespace Pennylane; using namespace Pennylane::Bindings; +using namespace Pennylane::LightningGPU::Util; using Pennylane::LightningTensor::TNCuda::MPSTNCuda; } // namespace /// @endcond @@ -88,54 +90,9 @@ auto getBackendInfo() -> py::dict { * * @param m Pybind11 module. */ -// TODO Move this method to a separate module for both LGPU and LTensor usage. void registerBackendSpecificInfo(py::module_ &m) { m.def("backend_info", &getBackendInfo, "Backend-specific information."); - m.def("device_reset", &deviceReset, "Reset all GPU devices and contexts."); - m.def("allToAllAccess", []() { - for (int i = 0; i < static_cast(getGPUCount()); i++) { - cudaDeviceEnablePeerAccess(i, 0); - } - }); - - m.def("is_gpu_supported", &isCuQuantumSupported, - py::arg("device_number") = 0, - "Checks if the given GPU device meets the minimum architecture " - "support for the PennyLane-Lightning-Tensor device."); - - m.def("get_gpu_arch", &getGPUArch, py::arg("device_number") = 0, - "Returns the given GPU major and minor GPU support."); - py::class_>(m, "DevPool") - .def(py::init<>()) - .def("getActiveDevices", &DevicePool::getActiveDevices) - .def("isActive", &DevicePool::isActive) - .def("isInactive", &DevicePool::isInactive) - .def("acquireDevice", &DevicePool::acquireDevice) - .def("releaseDevice", &DevicePool::releaseDevice) - .def("syncDevice", &DevicePool::syncDevice) - .def_static("getTotalDevices", &DevicePool::getTotalDevices) - .def_static("getDeviceUIDs", &DevicePool::getDeviceUIDs) - .def_static("setDeviceID", &DevicePool::setDeviceIdx); - - py::class_>(m, "DevTag") - .def(py::init<>()) - .def(py::init()) - .def(py::init([](int device_id, void *stream_id) { - // Note, streams must be handled externally for now. - // Binding support provided through void* conversion to cudaStream_t - return new DevTag(device_id, - static_cast(stream_id)); - })) - .def(py::init &>()) - .def("getDeviceID", &DevTag::getDeviceID) - .def("getStreamID", - [](DevTag &dev_tag) { - // default stream points to nullptr, so just return void* as - // type - return static_cast(dev_tag.getStreamID()); - }) - .def("refresh", &DevTag::refresh); + registerCudaUtils(m); } } // namespace Pennylane::LightningTensor::TNCuda - /// @endcond