Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix tests for msvc #264

Merged
merged 11 commits into from
Mar 21, 2022
2 changes: 1 addition & 1 deletion pennylane_lightning/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
"""Version information.
Version number (major.minor.patch[-label])
"""
__version__ = "0.23.0-dev6"
__version__ = "0.23.0-dev7"
86 changes: 44 additions & 42 deletions pennylane_lightning/src/algorithms/AdjointDiff.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -340,8 +340,8 @@ template <class T = double> class AdjointJacobian {
applyOperations(lambda, ops);
}

const auto tp_begin = tp.begin();
auto tp_it = tp.end();
auto tp_it = tp.rbegin();
const auto tp_rend = tp.rend();

// Create observable-applied state-vectors
std::vector<StateVectorManaged<T>> H_lambda(
Expand All @@ -355,49 +355,51 @@ template <class T = double> class AdjointJacobian {
PL_ABORT_IF(ops.getOpsParams()[op_idx].size() > 1,
"The operation is not supported using the adjoint "
"differentiation method");
if ((ops_name[op_idx] != "QubitStateVector") &&
(ops_name[op_idx] != "BasisState")) {
mu.updateData(lambda.getDataVector());
applyOperationAdj(lambda, ops, op_idx);

if (ops.hasParams(op_idx)) {
if ((current_param_idx == *(std::prev(tp_it))) ||
std::find(tp_begin, tp_it, current_param_idx) !=
tp_it) {
const T scalingFactor =
applyGenerator(mu, ops_name[op_idx],
ops.getOpsWires()[op_idx],
!ops.getOpsInverses()[op_idx]) *
(ops.getOpsInverses()[op_idx] ? -1 : 1);

const size_t mat_row_idx =
trainableParamNumber * num_observables;

// clang-format off

#if defined(_OPENMP)
#pragma omp parallel for default(none) \
shared(H_lambda, jac, mu, scalingFactor, \
mat_row_idx, \
num_observables)
#endif

// clang-format on
for (size_t obs_idx = 0; obs_idx < num_observables;
obs_idx++) {
jac[mat_row_idx + obs_idx] =
-2 * scalingFactor *
std::imag(innerProdC(
H_lambda[obs_idx].getDataVector(),
mu.getDataVector()));
}
trainableParamNumber--;
std::advance(tp_it, -1);
if ((ops_name[op_idx] == "QubitStateVector") ||
(ops_name[op_idx] == "BasisState")) {
continue;
}
if (tp_it == tp_rend) {
break; // All done
}
mu.updateData(lambda.getDataVector());
applyOperationAdj(lambda, ops, op_idx);

if (ops.hasParams(op_idx)) {
if (current_param_idx == *tp_it) {
const T scalingFactor =
applyGenerator(mu, ops_name[op_idx],
ops.getOpsWires()[op_idx],
!ops.getOpsInverses()[op_idx]) *
(ops.getOpsInverses()[op_idx] ? -1 : 1);

const size_t mat_row_idx =
trainableParamNumber * num_observables;

// clang-format off

#if defined(_OPENMP)
#pragma omp parallel for default(none) \
shared(H_lambda, jac, mu, scalingFactor, \
mat_row_idx, \
num_observables)
#endif

// clang-format on
for (size_t obs_idx = 0; obs_idx < num_observables;
obs_idx++) {
jac[mat_row_idx + obs_idx] =
-2 * scalingFactor *
std::imag(
innerProdC(H_lambda[obs_idx].getDataVector(),
mu.getDataVector()));
}
current_param_idx--;
trainableParamNumber--;
++tp_it;
}
applyOperationsAdj(H_lambda, ops, static_cast<size_t>(op_idx));
current_param_idx--;
}
applyOperationsAdj(H_lambda, ops, static_cast<size_t>(op_idx));
}
jac = Transpose(jac, jd.getNumParams(), num_observables);
}
Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/src/algorithms/JacobianTape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ template <class T> class JacobianData {
* @param obs Observables for which to calculate Jacobian.
* @param ops Operations used to create given state.
* @param trainP List of parameters participating in Jacobian
* calculation.
* calculation. This must be sorted.
*/
JacobianData(size_t num_params, size_t num_elem, std::complex<T> *ps,
std::vector<ObsDatum<T>> obs, OpsData<T> ops,
Expand Down
9 changes: 5 additions & 4 deletions pennylane_lightning/src/gates/GateImplementationsLM.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ class GateImplementationsLM : public PauliGenerator<GateImplementationsLM> {
applyTwoQubitOp(arr, num_qubits, matrix, wires, inverse);
break;
default: {
size_t dim = 1U << wires.size();
size_t dim = size_t{1U} << wires.size();
std::vector<size_t> indices;
indices.resize(dim);

Expand Down Expand Up @@ -1088,8 +1088,8 @@ class GateImplementationsLM : public PauliGenerator<GateImplementationsLM> {
const size_t i10 = i00 | rev_wire1_shift;
const size_t i11 = i00 | rev_wire0_shift | rev_wire1_shift;

arr[i00] = ComplexPrecisionT{};
arr[i01] = ComplexPrecisionT{};
arr[i00] = ComplexPrecisionT{0.0, 0.0};
arr[i01] = ComplexPrecisionT{0.0, 0.0};

std::swap(arr[i10], arr[i11]);
}
Expand Down Expand Up @@ -1188,7 +1188,8 @@ class GateImplementationsLM : public PauliGenerator<GateImplementationsLM> {
}

for (size_t k = 0; k < Util::exp2(num_qubits); k++) {
arr[k] *= (2 * int(Util::popcount(k & wires_parity) % 2) - 1);
arr[k] *= static_cast<PrecisionT>(
2 * int(Util::popcount(k & wires_parity) % 2) - 1);
}
// NOLINTNEXTLINE(readability-magic-numbers)
return static_cast<PrecisionT>(0.5);
Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/src/simulator/Measures.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ class Measures {
// Pick samples
for (size_t i = 0; i < num_samples; i++) {
fp_t pct = distribution(generator) * N;
size_t idx = pct;
auto idx = static_cast<size_t>(pct);
if (pct - idx > bucket[idx]) {
idx = bucket_partner[idx];
}
Expand Down
21 changes: 12 additions & 9 deletions pennylane_lightning/src/tests/TestHelpers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,8 @@ void scaleVector(std::vector<std::complex<Data_t>> &data, Data_t scalar) {
template <typename PrecisionT>
auto createZeroState(size_t num_qubits)
-> std::vector<std::complex<PrecisionT>> {
std::vector<std::complex<PrecisionT>> res(1U << num_qubits, {0.0, 0.0});
std::vector<std::complex<PrecisionT>> res(size_t{1U} << num_qubits,
{0.0, 0.0});
res[0] = std::complex<PrecisionT>{1.0, 0.0};
return res;
}
Expand All @@ -180,7 +181,8 @@ auto createZeroState(size_t num_qubits)
template <typename PrecisionT>
auto createPlusState(size_t num_qubits)
-> std::vector<std::complex<PrecisionT>> {
std::vector<std::complex<PrecisionT>> res(1U << num_qubits, {1.0, 0.0});
std::vector<std::complex<PrecisionT>> res(size_t{1U} << num_qubits,
{1.0, 0.0});
for (auto &elt : res) {
elt /= std::sqrt(1U << num_qubits);
}
Expand All @@ -205,9 +207,10 @@ auto squaredNorm(const std::complex<PrecisionT> *data, size_t data_size)
template <typename PrecisionT, class RandomEngine>
auto createRandomState(RandomEngine &re, size_t num_qubits)
-> std::vector<std::complex<PrecisionT>> {
std::vector<std::complex<PrecisionT>> res(1U << num_qubits, {0.0, 0.0});
std::vector<std::complex<PrecisionT>> res(size_t{1U} << num_qubits,
{0.0, 0.0});
std::uniform_real_distribution<PrecisionT> dist;
for (size_t idx = 0; idx < (1U << num_qubits); idx++) {
for (size_t idx = 0; idx < (size_t{1U} << num_qubits); idx++) {
res[idx] = {dist(re), dist(re)};
}

Expand All @@ -224,7 +227,7 @@ auto createRandomState(RandomEngine &re, size_t num_qubits)
template <typename PrecisionT> auto createProductState(std::string_view str) {
using Pennylane::Util::INVSQRT2;
std::vector<std::complex<PrecisionT>> st;
st.resize(1U << str.length());
st.resize(size_t{1U} << str.length());

std::vector<PrecisionT> zero{1.0, 0.0};
std::vector<PrecisionT> one{0.0, 1.0};
Expand All @@ -234,7 +237,7 @@ template <typename PrecisionT> auto createProductState(std::string_view str) {
std::vector<PrecisionT> minus{INVSQRT2<PrecisionT>(),
-INVSQRT2<PrecisionT>()};

for (size_t k = 0; k < (1U << str.length()); k++) {
for (size_t k = 0; k < (size_t{1U} << str.length()); k++) {
PrecisionT elt = 1.0;
for (size_t n = 0; n < str.length(); n++) {
char c = str[n];
Expand Down Expand Up @@ -286,9 +289,9 @@ auto createParams(Gates::GateOperation op) -> std::vector<PrecisionT> {
case 0:
return {};
case 1:
return {0.312};
return {PrecisionT{0.312}};
case 3:
return {0.128, -0.563, 1.414};
return {PrecisionT{0.128}, PrecisionT{-0.563}, PrecisionT{1.414}};
default:
PL_ABORT("The number of parameters for a given gate is unknown.");
}
Expand All @@ -303,7 +306,7 @@ template <typename PrecisionT, class RandomEngine>
auto randomUnitary(RandomEngine &re, size_t num_qubits)
-> std::vector<std::complex<PrecisionT>> {
using ComplexPrecisionT = std::complex<PrecisionT>;
const size_t dim = (1U << num_qubits);
const size_t dim = (size_t{1U} << num_qubits);
std::vector<ComplexPrecisionT> res(dim * dim, ComplexPrecisionT{});

std::normal_distribution<PrecisionT> dist;
Expand Down
8 changes: 5 additions & 3 deletions pennylane_lightning/src/tests/Test_AdjDiff.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#define _USE_MATH_DEFINES

#include <algorithm>
#include <cmath>
#include <complex>
Expand All @@ -18,6 +16,10 @@

#include "TestHelpers.hpp"

#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif

using namespace Pennylane;
using namespace Pennylane::Algorithms;

Expand Down Expand Up @@ -127,7 +129,7 @@ TEST_CASE("AdjointJacobian::adjointJacobian Op=RX, Obs=[Z,Z]",

CAPTURE(jacobian);
CHECK(-sin(param[0]) == Approx(jacobian[0]).margin(1e-7));
CHECK(0.0 == Approx(jacobian[1 * num_params + 1]).margin(1e-7));
CHECK(0.0 == Approx(jacobian[1 * num_obs - 1]).margin(1e-7));
}
}
TEST_CASE("AdjointJacobian::adjointJacobian Op=[RX,RX,RX], Obs=[Z,Z,Z]",
Expand Down
43 changes: 21 additions & 22 deletions pennylane_lightning/src/tests/Test_DynamicDispatcher.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,20 @@ using Pennylane::Gates::callGateOps;
* We just check DynamicDispacther calls the correct functuion by comparing
* the result from it with that of the direct call.
*/
template <typename PrecisionT, typename ParamT, class GateImplementation>
template <typename PrecisionT, typename ParamT, class GateImplementation,
GateOperation gate_op, class RandomEngine, typename Enable = void>
struct testDispatchForKernel {
template <
GateOperation gate_op, class RandomEngine,
std::enable_if_t<
Util::array_has_elt(GateImplementation::implemented_gates, gate_op),
bool> = true>
static void test(RandomEngine &re, size_t num_qubits) {
static_cast<void>(re);
static_cast<void>(num_qubits);
}
};
template <typename PrecisionT, typename ParamT, class GateImplementation,
GateOperation gate_op, class RandomEngine>
struct testDispatchForKernel<
PrecisionT, ParamT, GateImplementation, gate_op, RandomEngine,
std::enable_if_t<Util::array_has_elt(GateImplementation::implemented_gates,
gate_op)>> {
static void test(RandomEngine &re, size_t num_qubits) {
using CFP_t = std::complex<PrecisionT>;
const std::vector<CFP_t> ini_st =
Expand All @@ -61,20 +68,13 @@ struct testDispatchForKernel {
gate_name, wires, false, params);
REQUIRE(test_st == expected);
}

template <
GateOperation gate_op, class RandomEngine,
std::enable_if_t<!Util::array_has_elt(
GateImplementation::implemented_gates, gate_op),
bool> = true>
static void test(RandomEngine &re, size_t num_qubits) {
// Keep source, but allow clang-tidy to pass for unused
static_cast<void>(re);
static_cast<void>(num_qubits);
} // Do nothing if not implemented;
// This could probably be replaced with an enable_if or SFINAE-like
// pattern.
};
template <typename PrecisionT, typename ParamT, class GateImplementation,
GateOperation gate_op, class RandomEngine>
void testDynamicDispatch(RandomEngine &re, size_t num_qubits) {
testDispatchForKernel<PrecisionT, ParamT, GateImplementation, gate_op,
RandomEngine>::test(re, num_qubits);
}

template <typename PrecisionT, typename ParamT, class GateImplementation,
size_t idx, class RandomEngine>
Expand All @@ -86,11 +86,10 @@ constexpr void testAllGatesForKernelIter(RandomEngine &re,
if constexpr (gate_op != GateOperation::Matrix) { // ignore Matrix
for (size_t num_qubits = 3; num_qubits <= max_num_qubits;
num_qubits++) {
testDispatchForKernel<PrecisionT, ParamT, GateImplementation>::
template test<gate_op>(re, num_qubits);
testDynamicDispatch<PrecisionT, ParamT, GateImplementation,
gate_op>(re, num_qubits);
}
}

testAllGatesForKernelIter<PrecisionT, ParamT, GateImplementation,
idx + 1>(re, max_num_qubits);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ void testGeneratorForGate(RandomEngine &re, size_t num_qubits) {
using ComplexPrecisionT = std::complex<PrecisionT>;
constexpr auto I = Util::IMAG<PrecisionT>();

constexpr ParamT eps = 1e-4; // For finite difference
constexpr auto eps = PrecisionT{1e-4}; // For finite difference

constexpr auto gate_op = static_lookup<gntr_op>(generator_gate_pairs);
constexpr auto gate_name = static_lookup<gate_op>(Constant::gate_names);
Expand Down Expand Up @@ -107,7 +107,7 @@ void testGeneratorForGate(RandomEngine &re, size_t num_qubits) {
gate_func(diff_st_1.data(), num_qubits, wires, false, eps);
gate_func(diff_st_2.data(), num_qubits, wires, false, -eps);

std::vector<ComplexPrecisionT> gate_der_st(1U << num_qubits);
std::vector<ComplexPrecisionT> gate_der_st(size_t{1U} << num_qubits);

std::transform(
diff_st_1.cbegin(), diff_st_1.cend(), diff_st_2.cbegin(),
Expand All @@ -116,7 +116,7 @@ void testGeneratorForGate(RandomEngine &re, size_t num_qubits) {

scaleVector(gate_der_st, static_cast<PrecisionT>(0.5) / eps);

REQUIRE(gntr_st == PLApprox(gate_der_st).margin(1e-3));
REQUIRE(gntr_st == PLApprox(gate_der_st).margin(PrecisionT{1e-3}));
}
}
template <typename PrecisionT, typename ParamT, class GateImplementation,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void testInverseKernelGate(RandomEngine &re, size_t num_qubits) {
callGateOps(func_ptr, st.data(), num_qubits, wires, false, params);
callGateOps(func_ptr, st.data(), num_qubits, wires, true, params);

REQUIRE(st == PLApprox(ini_st).margin(1e-7));
REQUIRE(st == PLApprox(ini_st).margin(PrecisionT{1e-7}));
}
} else {
static_cast<void>(re);
Expand Down
Loading