Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix Lightning fallbacks to support PL 0.29 changes #416

Merged
merged 19 commits into from
Feb 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@

### Bug fixes

* Ensure error raised when asking for out of order marginal probabilities. Prevents the return of incorrect results.
[(#416)](https://github.com/PennyLaneAI/pennylane-lightning/pull/416)

* Fix Github shields in README.
[(#402)](https://github.com/PennyLaneAI/pennylane-lightning/pull/402)

Expand Down
6 changes: 5 additions & 1 deletion pennylane_lightning/_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
BasisState,
Hadamard,
Projector,
Hamiltonian,
QubitStateVector,
Rot,
)
Expand Down Expand Up @@ -65,10 +66,13 @@ def _obs_has_kernel(ob: Observable) -> bool:
"""
if is_pauli_word(ob):
return True
if isinstance(ob, (Hadamard, Projector)):
if isinstance(ob, (Hadamard)):
return True
if isinstance(ob, Hamiltonian):
return all(_obs_has_kernel(o) for o in ob.ops)
if isinstance(ob, Tensor):
return all(_obs_has_kernel(o) for o in ob.obs)

return False


Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.29.0-dev9"
__version__ = "0.29.0-dev10"
63 changes: 49 additions & 14 deletions pennylane_lightning/lightning_qubit.py
Original file line number Diff line number Diff line change
Expand Up @@ -753,9 +753,19 @@ def probability(self, wires=None, shot_range=None, bin_size=None):
# translate to wire labels used by device
device_wires = self.map_wires(wires)

if (
device_wires
and len(device_wires) > 1
and (not np.all(np.array(device_wires)[:-1] <= np.array(device_wires)[1:]))
):
raise RuntimeError(
"Lightning does not currently support out-of-order indices for probabilities"
)

# To support np.complex64 based on the type of self._state
dtype = self._state.dtype
ket = np.ravel(self._state)

state_vector = StateVectorC64(ket) if self.use_csingle else StateVectorC128(ket)
M = MeasuresC64(state_vector) if self.use_csingle else MeasuresC128(state_vector)
return M.probs(device_wires)
Expand Down Expand Up @@ -789,15 +799,10 @@ def expval(self, observable, shot_range=None, bin_size=None):
Returns:
Expectation value of the observable
"""
if (
(observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
or observable.name
in [
"Identity",
"Projector",
]
):
if observable.name in [
"Identity",
"Projector",
]:
return super().expval(observable, shot_range=shot_range, bin_size=bin_size)

if self.shots is not None:
Expand All @@ -813,8 +818,11 @@ def expval(self, observable, shot_range=None, bin_size=None):
M = MeasuresC64(state_vector) if self.use_csingle else MeasuresC128(state_vector)
if observable.name == "SparseHamiltonian":
if Kokkos_info()["USE_KOKKOS"] == True:
# converting COO to CSR sparse representation.
CSR_SparseHamiltonian = observable.data[0].tocsr(copy=False)
# ensuring CSR sparse representation.

CSR_SparseHamiltonian = observable.sparse_matrix(wire_order=self.wires).tocsr(
copy=False
)
return M.expval(
CSR_SparseHamiltonian.indptr,
CSR_SparseHamiltonian.indices,
Expand All @@ -824,7 +832,11 @@ def expval(self, observable, shot_range=None, bin_size=None):
"The expval of a SparseHamiltonian requires Kokkos and Kokkos Kernels."
)

if observable.name in ["Hamiltonian", "Hermitian"]:
if (
observable.name in ["Hamiltonian", "Hermitian"]
or (observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
):
ob_serialized = _serialize_ob(observable, self.wire_map, use_csingle=self.use_csingle)
return M.expval(ob_serialized)

Expand All @@ -847,10 +859,9 @@ def var(self, observable, shot_range=None, bin_size=None):
Returns:
Variance of the observable
"""
if isinstance(observable.name, List) or observable.name in [
if observable.name in [
"Identity",
"Projector",
"Hermitian",
]:
return super().var(observable, shot_range=shot_range, bin_size=bin_size)

Expand All @@ -866,6 +877,30 @@ def var(self, observable, shot_range=None, bin_size=None):
state_vector = StateVectorC64(ket) if self.use_csingle else StateVectorC128(ket)
M = MeasuresC64(state_vector) if self.use_csingle else MeasuresC128(state_vector)

if observable.name == "SparseHamiltonian":
if Kokkos_info()["USE_KOKKOS"] == True:
# ensuring CSR sparse representation.

CSR_SparseHamiltonian = observable.sparse_matrix(wire_order=self.wires).tocsr(
copy=False
)
return M.var(
CSR_SparseHamiltonian.indptr,
CSR_SparseHamiltonian.indices,
CSR_SparseHamiltonian.data,
)
raise NotImplementedError(
"The expval of a SparseHamiltonian requires Kokkos and Kokkos Kernels."
)

if (
observable.name in ["Hamiltonian", "Hermitian"]
or (observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
):
ob_serialized = _serialize_ob(observable, self.wire_map, use_csingle=self.use_csingle)
return M.var(ob_serialized)

# translate to wire labels used by device
observable_wires = self.map_wires(observable.wires)

Expand Down
41 changes: 34 additions & 7 deletions pennylane_lightning/src/bindings/Bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,12 @@ void lightning_class_bindings(py::module_ &m) {
.def(py::init<const StateVectorRawCPU<PrecisionT> &>())
.def("probs",
[](Measures<PrecisionT> &M, const std::vector<size_t> &wires) {
if (wires.empty()) {
return py::array_t<ParamT>(py::cast(M.probs()));
}
return py::array_t<ParamT>(py::cast(M.probs(wires)));
})
.def("probs",
[](Measures<PrecisionT> &M) {
return py::array_t<ParamT>(py::cast(M.probs()));
})
.def("expval",
static_cast<PrecisionT (Measures<PrecisionT>::*)(
const std::string &, const std::vector<size_t> &)>(
Expand Down Expand Up @@ -134,10 +135,36 @@ void lightning_class_bindings(py::module_ &m) {
strides /* strides for each axis */
));
})
.def("var", [](Measures<PrecisionT> &M, const std::string &operation,
const std::vector<size_t> &wires) {
return M.var(operation, wires);
});
.def("var",
[](Measures<PrecisionT> &M, const std::string &operation,
const std::vector<size_t> &wires) {
return M.var(operation, wires);
})
.def("var",
static_cast<PrecisionT (Measures<PrecisionT>::*)(
const std::string &, const std::vector<size_t> &)>(
&Measures<PrecisionT>::var),
"Variance of an operation by name.")
.def(
"var",
[](Measures<PrecisionT> &M,
const std::shared_ptr<Observable<PrecisionT>> &ob) {
return M.var(*ob);
},
"Variance of an operation object.")
.def(
"var",
[](Measures<PrecisionT> &M, const np_arr_sparse_ind row_map,
const np_arr_sparse_ind entries, const np_arr_c values) {
return M.var(
static_cast<sparse_index_type *>(row_map.request().ptr),
static_cast<sparse_index_type>(row_map.request().size),
static_cast<sparse_index_type *>(entries.request().ptr),
static_cast<std::complex<PrecisionT> *>(
values.request().ptr),
static_cast<sparse_index_type>(values.request().size));
},
"Expected value of a sparse Hamiltonian.");
}

template <class PrecisionT, class ParamT>
Expand Down
61 changes: 60 additions & 1 deletion pennylane_lightning/src/simulator/Measures.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,9 @@ class Measures {
* @return Floating point std::vector with probabilities.
* The basis columns are rearranged according to wires.
*/
std::vector<fp_t> probs(const std::vector<size_t> &wires) {
std::vector<fp_t>
probs(const std::vector<size_t> &wires,
[[maybe_unused]] const std::vector<size_t> &device_wires = {}) {
// Determining index that would sort the vector.
// This information is needed later.
const auto sorted_ind_wires = Util::sorting_indices(wires);
Expand Down Expand Up @@ -232,6 +234,26 @@ class Measures {
return std::real(inner_prod);
}

/**
* @brief Variance value for a general Observable
*
* @param ob Observable
*/
auto var(const Observable<fp_t> &ob) -> fp_t {
// Copying the original state vector, for the application of the
// observable operator.
StateVectorManagedCPU<fp_t> op_sv(original_statevector);
ob.applyInPlace(op_sv);

const fp_t mean_square = std::real(Util::innerProdC(
op_sv.getData(), op_sv.getData(), op_sv.getLength()));
const fp_t squared_mean = static_cast<fp_t>(std::pow(
std::real(Util::innerProdC(original_statevector.getData(),
op_sv.getData(), op_sv.getLength())),
2));
return (mean_square - squared_mean);
}

/**
* @brief Variance of an observable.
*
Expand Down Expand Up @@ -309,6 +331,43 @@ class Measures {
return expected_value_list;
};

/**
* @brief Variance of a Sparse Hamiltonian.
*
* @tparam index_type integer type used as indices of the sparse matrix.
* @param row_map_ptr row_map array pointer.
* The j element encodes the number of non-zeros above
* row j.
* @param row_map_size row_map array size.
* @param entries_ptr pointer to an array with column indices of the
* non-zero elements.
* @param values_ptr pointer to an array with the non-zero elements.
* @param numNNZ number of non-zero elements.
* @return fp_t Variance value.
*/
template <class index_type>
fp_t var(const index_type *row_map_ptr, const index_type row_map_size,
const index_type *entries_ptr, const CFP_t *values_ptr,
const index_type numNNZ) {
PL_ABORT_IF(
(original_statevector.getLength() != (size_t(row_map_size) - 1)),
"Statevector and Hamiltonian have incompatible sizes.");
auto operator_vector = Util::apply_Sparse_Matrix(
original_statevector.getData(),
static_cast<index_type>(original_statevector.getLength()),
row_map_ptr, row_map_size, entries_ptr, values_ptr, numNNZ);

const fp_t mean_square = std::real(
Util::innerProdC(operator_vector.data(), operator_vector.data(),
operator_vector.size()));
const fp_t squared_mean = static_cast<fp_t>(
std::pow(std::real(Util::innerProdC(operator_vector.data(),
original_statevector.getData(),
operator_vector.size())),
2));
return (mean_square - squared_mean);
};

/**
* @brief Generate samples using the alias method.
* Reference: https://en.wikipedia.org/wiki/Alias_method
Expand Down
Loading