Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix Lightning fallbacks to support PL 0.29 changes #416

Merged
merged 19 commits into from
Feb 21, 2023
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion pennylane_lightning/_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
BasisState,
Hadamard,
Projector,
Hamiltonian,
QubitStateVector,
Rot,
)
Expand Down Expand Up @@ -65,10 +66,13 @@ def _obs_has_kernel(ob: Observable) -> bool:
"""
if is_pauli_word(ob):
return True
if isinstance(ob, (Hadamard, Projector)):
if isinstance(ob, (Hadamard)):
return True
if isinstance(ob, Hamiltonian):
return all(_obs_has_kernel(o) for o in ob.ops)
if isinstance(ob, Tensor):
return all(_obs_has_kernel(o) for o in ob.obs)

return False


Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.29.0-dev9"
__version__ = "0.29.0-dev10"
51 changes: 38 additions & 13 deletions pennylane_lightning/lightning_qubit.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,15 +789,10 @@ def expval(self, observable, shot_range=None, bin_size=None):
Returns:
Expectation value of the observable
"""
if (
(observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
or observable.name
in [
"Identity",
"Projector",
]
):
if observable.name in [
"Identity",
"Projector",
]:
return super().expval(observable, shot_range=shot_range, bin_size=bin_size)

if self.shots is not None:
Expand All @@ -814,7 +809,10 @@ def expval(self, observable, shot_range=None, bin_size=None):
if observable.name == "SparseHamiltonian":
if Kokkos_info()["USE_KOKKOS"] == True:
# converting COO to CSR sparse representation.
mlxd marked this conversation as resolved.
Show resolved Hide resolved
CSR_SparseHamiltonian = observable.data[0].tocsr(copy=False)

CSR_SparseHamiltonian = observable.sparse_matrix(wire_order=self.wires).tocsr(
copy=False
)
return M.expval(
CSR_SparseHamiltonian.indptr,
CSR_SparseHamiltonian.indices,
Expand All @@ -824,7 +822,11 @@ def expval(self, observable, shot_range=None, bin_size=None):
"The expval of a SparseHamiltonian requires Kokkos and Kokkos Kernels."
)

if observable.name in ["Hamiltonian", "Hermitian"]:
if (
observable.name in ["Hamiltonian", "Hermitian"]
or (observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
):
ob_serialized = _serialize_ob(observable, self.wire_map, use_csingle=self.use_csingle)
return M.expval(ob_serialized)

Expand All @@ -847,10 +849,9 @@ def var(self, observable, shot_range=None, bin_size=None):
Returns:
Variance of the observable
"""
if isinstance(observable.name, List) or observable.name in [
if observable.name in [
"Identity",
"Projector",
"Hermitian",
]:
return super().var(observable, shot_range=shot_range, bin_size=bin_size)

Expand All @@ -866,6 +867,30 @@ def var(self, observable, shot_range=None, bin_size=None):
state_vector = StateVectorC64(ket) if self.use_csingle else StateVectorC128(ket)
M = MeasuresC64(state_vector) if self.use_csingle else MeasuresC128(state_vector)

if observable.name == "SparseHamiltonian":
if Kokkos_info()["USE_KOKKOS"] == True:
# converting COO to CSR sparse representation.
mlxd marked this conversation as resolved.
Show resolved Hide resolved

CSR_SparseHamiltonian = observable.sparse_matrix(wire_order=self.wires).tocsr(
copy=False
)
return M.var(
CSR_SparseHamiltonian.indptr,
CSR_SparseHamiltonian.indices,
CSR_SparseHamiltonian.data,
)
raise NotImplementedError(
"The expval of a SparseHamiltonian requires Kokkos and Kokkos Kernels."
)

if (
observable.name in ["Hamiltonian", "Hermitian"]
or (observable.arithmetic_depth > 0)
or isinstance(observable.name, List)
):
ob_serialized = _serialize_ob(observable, self.wire_map, use_csingle=self.use_csingle)
return M.var(ob_serialized)

# translate to wire labels used by device
observable_wires = self.map_wires(observable.wires)

Expand Down
34 changes: 30 additions & 4 deletions pennylane_lightning/src/bindings/Bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,36 @@ void lightning_class_bindings(py::module_ &m) {
strides /* strides for each axis */
));
})
.def("var", [](Measures<PrecisionT> &M, const std::string &operation,
const std::vector<size_t> &wires) {
return M.var(operation, wires);
});
.def("var",
[](Measures<PrecisionT> &M, const std::string &operation,
const std::vector<size_t> &wires) {
return M.var(operation, wires);
})
.def("var",
static_cast<PrecisionT (Measures<PrecisionT>::*)(
const std::string &, const std::vector<size_t> &)>(
&Measures<PrecisionT>::var),
"Variance of an operation by name.")
.def(
"var",
[](Measures<PrecisionT> &M,
const std::shared_ptr<Observable<PrecisionT>> &ob) {
return M.var(*ob);
},
"Variance of an operation object.")
.def(
"var",
[](Measures<PrecisionT> &M, const np_arr_sparse_ind row_map,
const np_arr_sparse_ind entries, const np_arr_c values) {
return M.var(
static_cast<sparse_index_type *>(row_map.request().ptr),
static_cast<sparse_index_type>(row_map.request().size),
static_cast<sparse_index_type *>(entries.request().ptr),
static_cast<std::complex<PrecisionT> *>(
values.request().ptr),
static_cast<sparse_index_type>(values.request().size));
},
"Expected value of a sparse Hamiltonian.");
}

template <class PrecisionT, class ParamT>
Expand Down
57 changes: 57 additions & 0 deletions pennylane_lightning/src/simulator/Measures.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,26 @@ class Measures {
return std::real(inner_prod);
}

/**
* @brief Variance value for a general Observable
*
* @param ob Observable
*/
auto var(const Observable<fp_t> &ob) -> fp_t {
// Copying the original state vector, for the application of the
// observable operator.
StateVectorManagedCPU<fp_t> op_sv(original_statevector);
ob.applyInPlace(op_sv);

const fp_t mean_square = std::real(Util::innerProdC(
op_sv.getData(), op_sv.getData(), op_sv.getLength()));
const fp_t squared_mean = static_cast<fp_t>(std::pow(
std::real(Util::innerProdC(original_statevector.getData(),
op_sv.getData(), op_sv.getLength())),
2));
return (mean_square - squared_mean);
}

/**
* @brief Variance of an observable.
*
Expand Down Expand Up @@ -309,6 +329,43 @@ class Measures {
return expected_value_list;
};

/**
* @brief Variance of a Sparse Hamiltonian.
*
* @tparam index_type integer type used as indices of the sparse matrix.
* @param row_map_ptr row_map array pointer.
* The j element encodes the number of non-zeros above
* row j.
* @param row_map_size row_map array size.
* @param entries_ptr pointer to an array with column indices of the
* non-zero elements.
* @param values_ptr pointer to an array with the non-zero elements.
* @param numNNZ number of non-zero elements.
* @return fp_t Variance value.
*/
template <class index_type>
fp_t var(const index_type *row_map_ptr, const index_type row_map_size,
const index_type *entries_ptr, const CFP_t *values_ptr,
const index_type numNNZ) {
PL_ABORT_IF(
(original_statevector.getLength() != (size_t(row_map_size) - 1)),
"Statevector and Hamiltonian have incompatible sizes.");
auto operator_vector = Util::apply_Sparse_Matrix(
original_statevector.getData(),
static_cast<index_type>(original_statevector.getLength()),
row_map_ptr, row_map_size, entries_ptr, values_ptr, numNNZ);

const fp_t mean_square = std::real(
Util::innerProdC(operator_vector.data(), operator_vector.data(),
operator_vector.size()));
const fp_t squared_mean = static_cast<fp_t>(
std::pow(std::real(Util::innerProdC(operator_vector.data(),
original_statevector.getData(),
operator_vector.size())),
2));
return (mean_square - squared_mean);
};

/**
* @brief Generate samples using the alias method.
* Reference: https://en.wikipedia.org/wiki/Alias_method
Expand Down
8 changes: 3 additions & 5 deletions tests/test_measures_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,7 @@ def circuit():
qml.RY(-0.2, wires=[1])
return qml.expval(
qml.SparseHamiltonian(
qml.utils.sparse_hamiltonian(
qml.Hamiltonian([1], [qml.PauliX(0) @ qml.Identity(1)])
),
qml.Hamiltonian([1], [qml.PauliX(0) @ qml.Identity(1)]).sparse_matrix(),
wires=[0, 1],
)
)
Expand Down Expand Up @@ -98,7 +96,7 @@ class TestSparseExpvalQChem:
symbols,
geometry,
)
H_sparse = qml.utils.sparse_hamiltonian(H)
H_sparse = H.sparse_matrix()

active_electrons = 1

Expand All @@ -114,7 +112,7 @@ def dev(self, request):
@pytest.mark.parametrize(
"qubits, wires, H_sparse, hf_state, excitations",
[
[qubits, np.arange(qubits), H_sparse, hf_state, excitations],
[qubits, range(qubits), H_sparse, hf_state, excitations],
[
qubits,
np.random.permutation(np.arange(qubits)),
Expand Down
22 changes: 13 additions & 9 deletions tests/test_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,29 +61,33 @@ def test_hadamard(self):
o = qml.Hadamard(0)
assert _obs_has_kernel(o)

def test_projector(self):
"""Tests if return is true for a Projector observable"""
o = qml.Projector([0], wires=0)
assert _obs_has_kernel(o)

def test_hermitian(self):
"""Tests if return is false for a Hermitian observable"""
o = qml.Hermitian(np.eye(2), wires=0)
assert not _obs_has_kernel(o)

def test_tensor_product_of_valid_terms(self):
"""Tests if return is true for a tensor product of Pauli, Hadamard, and Projector terms"""
o = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.Projector([0], wires=2)
"""Tests if return is true for a tensor product of Pauli, Hadamard, and Hamiltonian terms"""
o = qml.PauliZ(0) @ qml.Hadamard(1) @ (0.1 * (qml.PauliZ(2) + qml.PauliX(3)))
assert _obs_has_kernel(o)

def test_tensor_product_of_invalid_terms(self):
"""Tests if return is false for a tensor product of Hermitian terms"""
o = qml.Hermitian(np.eye(2), wires=0) @ qml.Hermitian(np.eye(2), wires=1)
o = (
qml.Hermitian(np.eye(2), wires=0)
@ qml.Hermitian(np.eye(2), wires=1)
@ qml.Projector([0], wires=2)
)
assert not _obs_has_kernel(o)

def test_tensor_product_of_mixed_terms(self):
"""Tests if return is false for a tensor product of valid and invalid terms"""
o = qml.PauliZ(0) @ qml.Hermitian(np.eye(2), wires=1)
o = qml.PauliZ(0) @ qml.Hermitian(np.eye(2), wires=1) @ qml.Projector([0], wires=2)
assert not _obs_has_kernel(o)

def test_projector(self):
"""Tests if return is false for a Projector observable"""
o = qml.Projector([0], wires=0)
assert not _obs_has_kernel(o)


Expand Down