Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add gradient methods to FinDiffGradients #9104

Merged
merged 9 commits into from
Nov 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 55 additions & 10 deletions qiskit/algorithms/gradients/finite_diff_estimator_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from __future__ import annotations

import sys
from typing import Sequence

import numpy as np
Expand All @@ -28,29 +29,55 @@
from .base_estimator_gradient import BaseEstimatorGradient
from .estimator_gradient_result import EstimatorGradientResult

if sys.version_info >= (3, 8):
# pylint: disable=no-name-in-module, ungrouped-imports
from typing import Literal
else:
from typing_extensions import Literal


class FiniteDiffEstimatorGradient(BaseEstimatorGradient):
"""
Compute the gradients of the expectation values by finite difference method.
"""

def __init__(self, estimator: BaseEstimator, epsilon: float, options: Options | None = None):
"""
def __init__(
self,
estimator: BaseEstimator,
epsilon: float,
options: Options | None = None,
*,
method: Literal["central", "forward", "backward"] = "central",
):
r"""
Args:
estimator: The estimator used to compute the gradients.
epsilon: The offset size for the finite difference gradients.
options: Primitive backend runtime options used for circuit execution.
The order of priority is: options in ``run`` method > gradient's
default options > primitive's default setting.
Higher priority setting overrides lower priority setting
method: The computation method of the gradients.

- ``central`` computes :math:`\frac{f(x+e)-f(x-e)}{2e}`,
- ``forward`` computes :math:`\frac{f(x+e) - f(x)}{e}`,
- ``backward`` computes :math:`\frac{f(x)-f(x-e)}{e}`

where :math:`e` is epsilon.

Raises:
ValueError: If ``epsilon`` is not positive.
TypeError: If ``method`` is invalid.
"""
if epsilon <= 0:
raise ValueError(f"epsilon ({epsilon}) should be positive.")
self._epsilon = epsilon
self._base_parameter_values_dict = {}
if method not in ("central", "forward", "backward"):
raise TypeError(
f"The argument method should be central, forward, or backward: {method} is given."
)
self._method = method
super().__init__(estimator, options)

def _run(
Expand All @@ -74,12 +101,25 @@ def _run(
metadata_.append({"parameters": [circuit.parameters[idx] for idx in indices]})

offset = np.identity(circuit.num_parameters)[indices, :]
plus = parameter_values_ + self._epsilon * offset
minus = parameter_values_ - self._epsilon * offset
n = 2 * len(indices)
job = self._estimator.run(
[circuit] * n, [observable] * n, plus.tolist() + minus.tolist(), **options
)
if self._method == "central":
plus = parameter_values_ + self._epsilon * offset
minus = parameter_values_ - self._epsilon * offset
n = 2 * len(indices)
job = self._estimator.run(
[circuit] * n, [observable] * n, plus.tolist() + minus.tolist(), **options
)
elif self._method == "forward":
plus = parameter_values_ + self._epsilon * offset
n = len(indices) + 1
job = self._estimator.run(
[circuit] * n, [observable] * n, [parameter_values_] + plus.tolist(), **options
)
elif self._method == "backward":
minus = parameter_values_ - self._epsilon * offset
n = len(indices) + 1
job = self._estimator.run(
[circuit] * n, [observable] * n, [parameter_values_] + minus.tolist(), **options
)
jobs.append(job)

# combine the results
Expand All @@ -90,8 +130,13 @@ def _run(

gradients = []
for result in results:
n = len(result.values) // 2 # is always a multiple of 2
gradient_ = (result.values[:n] - result.values[n:]) / (2 * self._epsilon)
if self._method == "central":
n = len(result.values) // 2 # is always a multiple of 2
gradient_ = (result.values[:n] - result.values[n:]) / (2 * self._epsilon)
elif self._method == "forward":
gradient_ = (result.values[1:] - result.values[0]) / self._epsilon
elif self._method == "backward":
gradient_ = (result.values[0] - result.values[1:]) / self._epsilon
gradients.append(gradient_)
opt = self._get_local_options(options)
return EstimatorGradientResult(gradients=gradients, metadata=metadata_, options=opt)
80 changes: 67 additions & 13 deletions qiskit/algorithms/gradients/finite_diff_sampler_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from __future__ import annotations

import sys
from typing import Sequence

import numpy as np
Expand All @@ -26,6 +27,12 @@
from .base_sampler_gradient import BaseSamplerGradient
from .sampler_gradient_result import SamplerGradientResult

if sys.version_info >= (3, 8):
# pylint: disable=no-name-in-module, ungrouped-imports
from typing import Literal
else:
from typing_extensions import Literal


class FiniteDiffSamplerGradient(BaseSamplerGradient):
"""Compute the gradients of the sampling probability by finite difference method."""
Expand All @@ -35,22 +42,37 @@ def __init__(
sampler: BaseSampler,
epsilon: float,
options: Options | None = None,
*,
method: Literal["central", "forward", "backward"] = "central",
):
"""
r"""
Args:
sampler: The sampler used to compute the gradients.
epsilon: The offset size for the finite difference gradients.
options: Primitive backend runtime options used for circuit execution.
The order of priority is: options in ``run`` method > gradient's
default options > primitive's default setting.
Higher priority setting overrides lower priority setting
method: The computation method of the gradients.

- ``central`` computes :math:`\frac{f(x+e)-f(x-e)}{2e}`,
- ``forward`` computes :math:`\frac{f(x+e) - f(x)}{e}`,
- ``backward`` computes :math:`\frac{f(x)-f(x-e)}{e}`

where :math:`e` is epsilon.

Raises:
ValueError: If ``epsilon`` is not positive.
TypeError: If ``method`` is invalid.
"""
if epsilon <= 0:
raise ValueError(f"epsilon ({epsilon}) should be positive.")
self._epsilon = epsilon
if method not in ("central", "forward", "backward"):
raise TypeError(
f"The argument method should be central, forward, or backward: {method} is given."
)
self._method = method
super().__init__(sampler, options)

def _run(
Expand All @@ -70,10 +92,23 @@ def _run(
indices = [circuit.parameters.data.index(p) for p in parameters_]
metadata_.append({"parameters": [circuit.parameters[idx] for idx in indices]})
offset = np.identity(circuit.num_parameters)[indices, :]
plus = parameter_values_ + self._epsilon * offset
minus = parameter_values_ - self._epsilon * offset
n = 2 * len(indices)
job = self._sampler.run([circuit] * n, plus.tolist() + minus.tolist(), **options)
if self._method == "central":
plus = parameter_values_ + self._epsilon * offset
minus = parameter_values_ - self._epsilon * offset
n = 2 * len(indices)
job = self._sampler.run([circuit] * n, plus.tolist() + minus.tolist(), **options)
elif self._method == "forward":
plus = parameter_values_ + self._epsilon * offset
n = len(indices) + 1
job = self._sampler.run(
[circuit] * n, [parameter_values_] + plus.tolist(), **options
)
elif self._method == "backward":
minus = parameter_values_ - self._epsilon * offset
n = len(indices) + 1
job = self._sampler.run(
[circuit] * n, [parameter_values_] + minus.tolist(), **options
)
jobs.append(job)

# combine the results
Expand All @@ -84,14 +119,33 @@ def _run(

gradients = []
for i, result in enumerate(results):
n = len(result.quasi_dists) // 2
gradient_ = []
for dist_plus, dist_minus in zip(result.quasi_dists[:n], result.quasi_dists[n:]):
grad_dist = np.zeros(2 ** circuits[i].num_qubits)
grad_dist[list(dist_plus.keys())] += list(dist_plus.values())
grad_dist[list(dist_minus.keys())] -= list(dist_minus.values())
grad_dist /= 2 * self._epsilon
gradient_.append(dict(enumerate(grad_dist)))
if self._method == "central":
n = len(result.quasi_dists) // 2
gradient_ = []
for dist_plus, dist_minus in zip(result.quasi_dists[:n], result.quasi_dists[n:]):
grad_dist = np.zeros(2 ** circuits[i].num_qubits)
grad_dist[list(dist_plus.keys())] += list(dist_plus.values())
grad_dist[list(dist_minus.keys())] -= list(dist_minus.values())
grad_dist /= 2 * self._epsilon
gradient_.append(dict(enumerate(grad_dist)))
elif self._method == "forward":
gradient_ = []
dist_zero = result.quasi_dists[0]
for dist_plus in result.quasi_dists[1:]:
grad_dist = np.zeros(2 ** circuits[i].num_qubits)
grad_dist[list(dist_plus.keys())] += list(dist_plus.values())
grad_dist[list(dist_zero.keys())] -= list(dist_zero.values())
grad_dist /= self._epsilon
gradient_.append(dict(enumerate(grad_dist)))
elif self._method == "backward":
gradient_ = []
dist_zero = result.quasi_dists[0]
for dist_minus in result.quasi_dists[1:]:
grad_dist = np.zeros(2 ** circuits[i].num_qubits)
grad_dist[list(dist_zero.keys())] += list(dist_zero.values())
grad_dist[list(dist_minus.keys())] -= list(dist_minus.values())
grad_dist /= self._epsilon
gradient_.append(dict(enumerate(grad_dist)))
gradients.append(gradient_)

opt = self._get_local_options(options)
Expand Down
10 changes: 10 additions & 0 deletions releasenotes/notes/gradient-methods-b2ec34916b83c17b.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
features:
- |
:class:`.FiniteDiffEstimatorGradient` and :class:`FiniteDiffSamplerGradient`
have new argument method.
There are three methods, "central", "forward", and "backward".
This option changes the gradient calculation methods.
"central" calculates :math:`\frac{f(x+e)-f(x-e)}{2e}`, "forward"
:math:`\frac{f(x+e) - f(x)}{e}`, and "backward" :math:`\frac{f(x)-f(x-e)}{e}` where
:math:`e` is the offset epsilon.
Loading