From 4bbe2bde8082d832afb872ad694895ce279a52b9 Mon Sep 17 00:00:00 2001 From: Tomasz Rybotycki Date: Fri, 29 Jul 2022 18:46:07 +0200 Subject: [PATCH] 55 general losses bobs (#56) * refactor(theboss): Work in progress on refactoring theboss. * refactor(theboss): Refactor of the boson sampling utilities. * refactor(theboss): Remove quantum computation utilities tests as they don't test anything now. * refactor(theboss): Add boson sampling utilities tests file. * refactor(theboss): Add tests for boson sampling utilities. Fix problems in related files. * refactor(theboss): Add initial version of fixed non-uniform losses approximation strategy. Black whole project. * fix(interface): Fix the interface issues by making some of the interfaces more general. * fix(permanents): Fix classical permanent calculator after the quantum utilities script refactor. * fix(permanents): Fix the tests after the quantum utilities script refactor. * refactor(typehints): Fix typehints in most of the files. * feat(tests): Add test for the non-uniform losses strategies. * refactor(tests): Add comments and fix typehints in the tests. * update(version): Update theboss version to 3.0.0 Co-authored-by: Tomasz Rybotycki --- setup.py | 2 +- tests/gcc_based_strategies_tests_base.py | 41 +- tests/simulation_strategies_tests_common.py | 24 +- tests/test_bobs_strategy.py | 4 +- tests/test_boson_sampling_utilities.py | 476 ++++++++++++++ tests/test_distribution_calculators.py | 40 +- tests/test_exact_distribution_calculator.py | 7 +- ...ull_approximation_simulation_strategies.py | 16 +- tests/test_network_simulation_strategies.py | 81 +-- ...nonuniform_losses_simulation_strategies.py | 317 ++++++++++ tests/test_quantum_computations_utilities.py | 57 -- theboss/boson_sampling_simulator.py | 10 +- .../boson_sampling_utilities.py | 598 +++++++++++------- ..._ryser_submatrices_permanent_calculator.py | 10 +- .../bs_permanent_calculator_base.py | 66 +- .../bs_permanent_calculator_interface.py | 27 +- ...s_submatrices_permanent_calculator_base.py | 44 +- .../classic_permanent_calculator.py | 4 +- .../ryser_permanent_calculator.py | 12 +- ...bution_from_separable_states_calculator.py | 12 +- .../bs_distribution_calculator_interface.py | 25 +- ...stribution_calculator_with_fixed_losses.py | 8 +- ..._exact_distribution_with_uniform_losses.py | 14 +- ...bs_sample_based_distribution_calculator.py | 15 +- .../lossless_network_simulation_strategy.py | 21 +- .../lossy_network_simulation_strategy.py | 34 +- theboss/quantum_computations_utilities.py | 35 +- .../cliffords_r_simulation_strategy.py | 104 ++- .../fixed_loss_simulation_strategy.py | 95 ++- ...ralized_cliffords_b_simulation_strategy.py | 35 +- ...ds_b_uniform_losses_simulation_strategy.py | 7 +- ...neralized_cliffords_simulation_strategy.py | 159 +++-- ...ords_uniform_losses_simulation_strategy.py | 10 +- ...neralized_cliffords_simulation_strategy.py | 76 ++- ..._state_approximated_simulation_strategy.py | 220 +++++-- ...eralized_cliffords_simulation_strategy.py} | 65 +- ...onuniform_losses_approximation_strategy.py | 314 +++++++-- .../simulation_strategy_factory.py | 1 - .../simulation_strategy_interface.py | 8 +- .../uniform_loss_simulation_strategy.py | 51 +- 40 files changed, 2329 insertions(+), 816 deletions(-) create mode 100644 tests/test_boson_sampling_utilities.py create mode 100644 tests/test_nonuniform_losses_simulation_strategies.py delete mode 100644 tests/test_quantum_computations_utilities.py rename theboss/simulation_strategies/{generalized_cliffords_simulation_strategy_v2.py => mode_assignment_generalized_cliffords_simulation_strategy.py} (56%) diff --git a/setup.py b/setup.py index 3abcd02..57b641f 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="theboss", - version="2.1.1", + version="3.0.0", author="Tomasz Rybotycki", author_email="rybotycki.tomasz+theboss@gmail.com", long_description=long_description, diff --git a/tests/gcc_based_strategies_tests_base.py b/tests/gcc_based_strategies_tests_base.py index 75ee13e..079e014 100644 --- a/tests/gcc_based_strategies_tests_base.py +++ b/tests/gcc_based_strategies_tests_base.py @@ -7,8 +7,7 @@ """ from theboss.boson_sampling_utilities.boson_sampling_utilities import ( - calculate_number_of_possible_n_particle_m_mode_output_states, - calculate_number_of_possible_lossy_n_particle_m_mode_output_states, + bosonic_space_dimension, ) from theboss.distribution_calculators.bs_exact_distribution_with_uniform_losses import ( BSDistributionCalculatorWithFixedLosses, @@ -22,22 +21,34 @@ class GCCBasedStrategiesTestsBase(TestBSClassicalSimulationStrategies): + """ + A base class for GCC based tests. It takes care of some boilerplate code. + """ + def _perform_lossless_test(self, strategy: StrategyType = StrategyType.GCC) -> None: + """ + Boilerplate code taking care of performing common part of the uniform losses + tests. + + :param strategy: + Strategy to test. + """ self._strategies_factory.experiment_configuration = ( self._sampling_tvd_experiment_config ) self._strategies_factory.strategy_type = strategy distance_experiment_configuration = SamplingAccuracyExperimentConfiguration( - # This exact calculator, when there are no losses, will do the work just fine. + # This exact calculator, when there are no losses, will do the work just + # fine. exact_calculator=BSDistributionCalculatorWithFixedLosses( self._sampling_tvd_experiment_config, self._bs_permanent_calculator ), estimation_calculator=self._generate_frequencies_calculator( self._strategies_factory.generate_strategy() ), - outcomes_number=calculate_number_of_possible_n_particle_m_mode_output_states( - n=self._sampling_tvd_experiment_config.number_of_particles_left, - m=self._sampling_tvd_experiment_config.number_of_modes, + outcomes_number=bosonic_space_dimension( + particles_number=self._sampling_tvd_experiment_config.number_of_particles_left, + modes_number=self._sampling_tvd_experiment_config.number_of_modes, ), approximation_tvd_bound=0, # This strategy returns exact solution. ) @@ -48,7 +59,15 @@ def _perform_test_for_uniform_losses( strategy: StrategyType = StrategyType.UNIFORM_LOSSES_GCC, approximation_bound: int = 0, ) -> None: + """ + Boilerplate code taking care of performing common part of the uniform losses + tests. + :param strategy: + Strategy to test. + :param approximation_bound: + Approximation tvd upperbound. + """ self._strategies_factory.experiment_configuration = ( self._sampling_tvd_experiment_config ) @@ -73,15 +92,17 @@ def _perform_test_for_uniform_losses( ) distance_experiment_configuration = SamplingAccuracyExperimentConfiguration( - # This exact calculator, when there are no losses, will do the work just fine. + # This exact calculator, when there are no losses, will do the work just + # fine. exact_calculator=exact_calculator, estimation_calculator=self._generate_frequencies_calculator( self._strategies_factory.generate_strategy(), outcomes=exact_calculator.get_outcomes_in_proper_order(), ), - outcomes_number=calculate_number_of_possible_lossy_n_particle_m_mode_output_states( - n=self._sampling_tvd_experiment_config.initial_number_of_particles, - m=self._sampling_tvd_experiment_config.number_of_modes, + outcomes_number=bosonic_space_dimension( + particles_number=self._sampling_tvd_experiment_config.initial_number_of_particles, + modes_number=self._sampling_tvd_experiment_config.number_of_modes, + losses=True, ), approximation_tvd_bound=approximation_bound, ) diff --git a/tests/simulation_strategies_tests_common.py b/tests/simulation_strategies_tests_common.py index 067461c..919f263 100644 --- a/tests/simulation_strategies_tests_common.py +++ b/tests/simulation_strategies_tests_common.py @@ -9,8 +9,8 @@ import unittest from copy import deepcopy from dataclasses import dataclass -from typing import List -from numpy import array, ndarray, average +from typing import List, Tuple +from numpy import array, average from numpy.random import randint from scipy.stats import unitary_group from theboss.boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_factory import ( @@ -35,7 +35,7 @@ SimulationStrategyInterface, ) from theboss.boson_sampling_utilities.boson_sampling_utilities import ( - calculate_number_of_possible_n_particle_m_mode_output_states, + bosonic_space_dimension, ) from tqdm import tqdm @@ -78,7 +78,6 @@ def __init__(self, *args, **kwargs): self._calculator_initial_state = None def setUp(self) -> None: - print(f"\nIn method {self._testMethodName}. Test start!\n") self._permutation_matrix = array( [ [0, 0, 1, 0, 0], @@ -108,7 +107,7 @@ def setUp(self) -> None: self._sampling_tvd_experiment_config = BosonSamplingExperimentConfiguration( interferometer_matrix=self._permutation_matrix, - initial_state=array(self._distance_calculation_initial_state, dtype=int), + initial_state=self._distance_calculation_initial_state, initial_number_of_particles=distance_calculation_initial_number_of_particles, number_of_modes=len(self._distance_calculation_initial_state), number_of_particles_lost=self._distance_calculation_number_of_particles_lost, @@ -136,7 +135,7 @@ def setUp(self) -> None: haar_random_number_of_particles_lost = 2 self._haar_random_experiment_configuration = BosonSamplingExperimentConfiguration( interferometer_matrix=array([], dtype=complex), - initial_state=array(self._haar_random_experiment_input_state, dtype=int), + initial_state=self._haar_random_experiment_input_state, initial_number_of_particles=haar_random_number_of_particles_lost, number_of_modes=len(self._haar_random_experiment_input_state), number_of_particles_lost=haar_random_number_of_particles_lost, @@ -147,9 +146,6 @@ def setUp(self) -> None: self._calculator_initial_state = self._distance_calculation_initial_state - def tearDown(self) -> None: - print("\nTest finished!\n") - def _prepare_lossless_distance_experiments_settings_with_binned_inputs( self, ) -> None: @@ -241,7 +237,9 @@ def _check_if_approximation_is_within_bounds( ) def _generate_frequencies_calculator( - self, strategy: SimulationStrategyInterface, outcomes: List[ndarray] = None + self, + strategy: SimulationStrategyInterface, + outcomes: List[Tuple[int, ...]] = None, ) -> BSSampleBasedDistributionCalculator: estimated_distribution_calculator = BSSampleBasedDistributionCalculator( experiment_configuration=self._sampling_tvd_experiment_config, @@ -321,9 +319,9 @@ def _prepare_lossless_distance_experiment_settings(self) -> None: def _test_state_average_probability_for_haar_random_matrices( self, strategy_factory: SimulationStrategyFactory ) -> None: - number_of_outcomes = calculate_number_of_possible_n_particle_m_mode_output_states( - n=self._haar_random_experiment_configuration.number_of_particles_left, - m=self._haar_random_experiment_configuration.number_of_modes, + number_of_outcomes = bosonic_space_dimension( + particles_number=self._haar_random_experiment_configuration.number_of_particles_left, + modes_number=self._haar_random_experiment_configuration.number_of_modes, ) error_bound = count_tv_distance_error_bound_of_experiment_results( diff --git a/tests/test_bobs_strategy.py b/tests/test_bobs_strategy.py index d193c86..dcabaac 100644 --- a/tests/test_bobs_strategy.py +++ b/tests/test_bobs_strategy.py @@ -1,7 +1,9 @@ __author__ = "Tomasz Rybotycki" """ - The aim of this script is to test the BOBS strategy accuracy. + The aim of this script is to test the BOBS strategy accuracy. This script tests + only simulations with uniform or no losses. Nonuniform losses have been placed + in another script. """ from tests.gcc_based_strategies_tests_base import GCCBasedStrategiesTestsBase diff --git a/tests/test_boson_sampling_utilities.py b/tests/test_boson_sampling_utilities.py new file mode 100644 index 0000000..1de3e93 --- /dev/null +++ b/tests/test_boson_sampling_utilities.py @@ -0,0 +1,476 @@ +__author__ = "Tomasz Rybotycki" + +""" + This script contains tests for the boson sampling utilities. +""" + +import unittest +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( + mode_assignment_to_mode_occupation, + mode_occupation_to_mode_assignment, + bosonic_space_dimension, + generate_possible_states, + generate_lossy_n_particle_input_states, + get_modes_transmissivity_values_from_matrix, + prepare_interferometer_matrix_in_expanded_space, + generate_state_types, + compute_number_of_state_types, + compute_number_of_states_of_given_type, + compute_number_of_k_element_integer_partitions_of_n, + generate_qft_matrix_for_first_m_modes, + generate_random_phases_matrix_for_first_m_modes, +) + +from theboss.quantum_computations_utilities import compute_qft_matrix + +from typing import List, Tuple, Set, Iterable, Dict +from scipy.stats import unitary_group +from numpy import diag, sqrt, isclose, eye, nonzero, abs + + +class TestQuantumComputationsUtilities(unittest.TestCase): + def setUp(self) -> None: + """ + Basic method of the unittest.TestCase. Sets up the variables used in the tests. + """ + + # Corresponding states in mode occupation and mode assigment representations. + self._mode_occupation_state: Tuple[int, ...] = ( + 1, + 2, + 0, + 1, + 0, + ) + self._mode_assigment_state: Tuple[int, ...] = ( + 0, + 1, + 1, + 3, + ) + self._trimmed_mode_occupation_state: Tuple[int, ...] = ( + 1, + 2, + 0, + 1, + ) + + self._m: int = 10 # Modes number + self._n: int = 5 # Particles number + + self._empty_state = tuple([0 for _ in range(self._m)]) + + self._lossless_input: Tuple[int, ...] = (3, 2, 0, 1, 0) + self._lossy_input_states: List[Tuple[int, ...]] = [ + (3, 2, 0, 1, 0), + (3, 2, 0, 0, 0), + (3, 1, 0, 1, 0), + (3, 1, 0, 0, 0), + (3, 0, 0, 1, 0), + (3, 0, 0, 0, 0), + (2, 2, 0, 1, 0), + (2, 2, 0, 0, 0), + (2, 1, 0, 1, 0), + (2, 1, 0, 0, 0), + (2, 0, 0, 1, 0), + (2, 0, 0, 0, 0), + (1, 2, 0, 1, 0), + (1, 2, 0, 0, 0), + (1, 1, 0, 1, 0), + (1, 1, 0, 0, 0), + (1, 0, 0, 1, 0), + (1, 0, 0, 0, 0), + (0, 2, 0, 1, 0), + (0, 2, 0, 0, 0), + (0, 1, 0, 1, 0), + (0, 1, 0, 0, 0), + (0, 0, 0, 1, 0), + (0, 0, 0, 0, 0), + ] + + # For the lossy matrix tests. + self._transmissivities: List[float] = [0.1, 0.2, 0.3, 0.4, 0.5] + self._matrix = unitary_group.rvs(len(self._transmissivities)) + self._matrix = self._matrix @ diag([sqrt(t) for t in self._transmissivities]) + + # Matrix space expansion test. + self._matrix_to_expand = eye(3) @ diag([sqrt(i / 10) for i in range(1, 4)]) + self._expanded_matrix = eye(6) + + for i in range(3): + self._expanded_matrix[i][i] = sqrt(i / 10) + self._expanded_matrix[i + 3][i + 3] = sqrt(i / 10) + self._expanded_matrix[i + 3][i] = sqrt(1 - i / 10) + self._expanded_matrix[i][i + 3] = sqrt(1 - i / 10) + + # QFT and random phases matrices generation tests. + self._first_m_modes: int = 3 + self._all_modes: int = 3 + + def test_mode_occupation_to_mode_assigment(self) -> None: + """ + Check if a state in the mode occupation representation is properly + transformed into a state in the mode assigment representation. + """ + + transformed_state: Tuple[int, ...] = mode_occupation_to_mode_assignment( + self._mode_occupation_state + ) + + self.assertTrue( + transformed_state == self._mode_assigment_state, + f"{transformed_state} != {self._mode_assigment_state}", + ) + + def test_mode_assigment_to_mode_occupation(self) -> None: + """ + Check if a state in the mode assigment representation is properly + transformed into a state in the mode occupation representation. + """ + + transformed_state: Tuple[int, ...] = mode_assignment_to_mode_occupation( + self._mode_assigment_state, len(self._mode_occupation_state) + ) + + self.assertTrue( + transformed_state == self._mode_occupation_state, + f"{transformed_state} != {self._mode_occupation_state}", + ) + + def test_mode_assigment_to_mode_occupation_wo_modes_number_specification( + self, + ) -> None: + """ + Tests if the default behavior of the assigment to occupation representation, + when the number of modes is not specified. + """ + + transformed_state: Tuple[int, ...] = mode_assignment_to_mode_occupation( + self._mode_assigment_state + ) + + self.assertTrue( + transformed_state == self._trimmed_mode_occupation_state, + f"{transformed_state} != {self._trimmed_mode_occupation_state}", + ) + + def test_no_mode_states_generation(self) -> None: + """ + Test if generate_possible_states method returns an empty list if the number of + modes equals 0. + """ + all_states: List[Tuple[int, ...]] = generate_possible_states(self._n, 0) + self.assertTrue(len(all_states) == 0, f"There seems to be states with 0 modes!") + + def test_negative_particles_number_states_generation(self) -> None: + """ + Test if generate_possible_states method returns an empty list if the number of + particles is negative. + """ + all_states: List[Tuple[int, ...]] = generate_possible_states(-1, self._m, True) + self.assertTrue( + len(all_states) == 0, + f"There seems to be states with negative number of particles!", + ) + + def test_no_particles_states_generation(self) -> None: + """ + Test if generate_possible_states method returns a particle-less state of proper + size when the number of particles is equal to 0. + """ + all_states = generate_possible_states(0, self._m, True) + self.assertTrue( + len(all_states) == 1, f"There's more than 1 particle-less state!" + ) + self.assertTrue( + all_states[0] == self._empty_state, + f"{all_states[0]} != {self._empty_state}", + ) + + def test_generated_states_uniqueness(self) -> None: + """ + Tests if every state generated by the generate_possible_states is unique. + """ + all_states: List[Tuple[int, ...]] = generate_possible_states( + self._n, self._m, True + ) + + self.assertTrue( + len(set(all_states)) == len(all_states), f"Some states are not unique!" + ) + + def test_generated_states_modes_number(self) -> None: + """ + Tests if every state has the proper modes number. + """ + for state in generate_possible_states(self._n, self._m, True): + self.assertTrue( + len(state) == self._m, + f"The number of modes in {state} is not {self._m}!", + ) + + def test_generated_states_particles_number(self) -> None: + """ + Tests if every state has the proper particles number in a case without losses. + """ + for state in generate_possible_states(self._n, self._m, False): + self.assertTrue( + sum(state) == self._n, + f"The number of particles in {state} is not {self._n}!", + ) + + def test_generated_lossy_states_particles_number(self) -> None: + """ + Tests if every state has equal or fewer particles than specified. + """ + for state in generate_possible_states(self._n, self._m, True): + self.assertTrue( + sum(state) <= self._n, + f"The number of particles in {state} is not <= {self._n}!", + ) + + def test_states_number(self) -> None: + """ + Test if the number of generated states is theoretically correct. + """ + + all_states: List[Tuple[int, ...]] = generate_possible_states(self._n, self._m) + + theoretical_dimension: int = bosonic_space_dimension(self._n, self._m) + + self.assertTrue( + len(all_states) == theoretical_dimension, + f"{len(all_states)} != {theoretical_dimension}", + ) + + def test_lossy_states_number(self) -> None: + """ + Test if the number of generated lossy states is theoretically correct. + """ + + all_lossy_states = generate_possible_states(self._n, self._m, True) + + theoretical_dimension: int = bosonic_space_dimension(self._n, self._m, True) + + self.assertTrue( + len(all_lossy_states) == theoretical_dimension, + f"{len(all_lossy_states)} != {theoretical_dimension}", + ) + + def test_lossy_input_states_generation(self) -> None: + """ + Test if generate_lossy_input_states method actually generates all the lossy + input states it should. + """ + for particles_left_number in range(sum(self._lossless_input)): + lossy_input_states: Set[Tuple[int, ...]] = set( + generate_lossy_n_particle_input_states( + self._lossless_input, particles_left_number + ) + ) + for state in self._lossy_input_states: + if sum(state) == particles_left_number: + self.assertTrue( + state in lossy_input_states, + f"{state} is not in the lossy input states {lossy_input_states} of {self._lossless_input} for n = {particles_left_number}!", + ) + + def test_modes_transmissivity_value_computation(self) -> None: + """ + Test if proper transmissivities are obtained from the lossy matrix. + + Notice that due to approximation this might not be accurate, hence the isclose + method is used for comparison. + """ + transmissivities: Set[float] = set( + get_modes_transmissivity_values_from_matrix(self._matrix) + ) + for transmissivity in self._transmissivities: + self.assertTrue( + self._is_close_to_any(transmissivity, transmissivities), + f"{transmissivity} not in {transmissivities}!", + ) + + @staticmethod + def _is_close_to_any(value: float, values: Iterable[float]) -> bool: + """ + This method checks if value is close to any of given values. + + :param value: + Value to compare with the other values. + + :param values: + Other values used for the comparison. + + :return: + True is value is close to any of the values, else False. + """ + for v in values: + if isclose(value, v): + return True + return False + + def test_expanded_space_lossy_interferometer_preparation(self) -> None: + """ + Tests if the lossy interferometer is properly expanded into the higher dimension + for nonuniform losses' simulation. + """ + expanded_interferometer = prepare_interferometer_matrix_in_expanded_space( + self._matrix_to_expand + ) + self.assertTrue((expanded_interferometer == self._expanded_matrix).all) + + def test_expanded_space_lossless_interferometer_preparation(self) -> None: + """ + Tests if the lossless interferometer is properly expanded into the higher + dimension (as an edge-case). + """ + m: int = 3 + expanded_matrix = prepare_interferometer_matrix_in_expanded_space(eye(m)) + self.assertTrue((expanded_matrix == eye(2 * m)).all) + + def test_lossless_state_types_generation(self) -> None: + """ + Tests if all lossless state types are generated. + """ + self._test_state_types_generation(losses=False) + + def test_lossy_state_types_generation(self) -> None: + """ + Tests if all lossy state types are generated. + """ + self._test_state_types_generation(losses=True) + + def _test_state_types_generation(self, losses: bool = False) -> None: + """ + This method takes care of boilerplate code concerning state types generation. + + :param losses: + A flag informing if losses should be considered. + """ + all_states: List[Tuple[int, ...]] = generate_possible_states( + self._n, self._m, losses + ) + state_types: Set[Tuple[int, ...]] = set() + generated_state_types = generate_state_types(self._m, self._n, losses) + + for state in all_states: + state_types.add(tuple(sorted(state, reverse=True))) + + self.assertTrue(len(generated_state_types) == len(state_types)) + + def test_lossless_state_types_number_computation(self) -> None: + """ + Test if the number of lossless state types is computed properly. + """ + self._test_state_types_number_computation() + + def test_lossy_state_types_number_computation(self) -> None: + """ + Test if the number of lossy state types is computed properly. + """ + self._test_state_types_number_computation(True) + + def _test_state_types_number_computation(self, losses: bool = False) -> None: + """ + This method takes care of boilerplate code concerning state types number + computation. + + :param losses: + A flag informing if losses should be considered. + """ + state_types_number = compute_number_of_state_types(self._m, self._n, losses) + state_types = generate_state_types(self._m, self._n, losses) + + self.assertTrue( + len(state_types) == state_types_number, + f"State types number is different than {state_types_number}!", + ) + + def test_number_of_states_of_given_type_computation(self) -> None: + """ + Test if the number of states of given type are properly computed. + """ + state_types: List[Tuple[int, ...]] = generate_state_types(self._m, self._n) + all_states: List[Tuple[int, ...]] = generate_possible_states(self._n, self._m) + + counts: Dict[Tuple[int, ...], int] = {} + + for state_type in state_types: + counts[state_type] = 0 + + for state in all_states: + counts[tuple(sorted(state, reverse=True))] += 1 + + for state_type in state_types: + states_of_given_type_number = compute_number_of_states_of_given_type( + state_type + ) + self.assertTrue( + states_of_given_type_number == counts[state_type], + f"{states_of_given_type_number} != {counts[state_type]}", + ) + + def test_number_of_k_element_partitions_of_n_computation(self) -> None: + """ + Test if the number of :math:`k`-element partitions of integer :math:`n` is + computed properly. Notice that the number of partitions is closely related to + the number of state types, thus the latter can be used in the tests. + """ + state_types: List[Tuple[int, ...]] = generate_state_types(self._n, self._n) + counts: Dict[int, int] = {} + + for i in range(self._n): + counts[i + 1] = 0 + + for state_type in state_types: + counts[len(nonzero(state_type)[0])] += 1 + + for k in range(1, self._n + 1): + self.assertTrue( + counts[k] + == compute_number_of_k_element_integer_partitions_of_n(k, self._n), + f"{counts[k]} != {compute_number_of_k_element_integer_partitions_of_n(k, self._n)}", + ) + + def test_qft_matrix_generation_on_first_m_modes(self) -> None: + """ + Test if the :math:`m \\times m` QFT matrix is properly embedded into the first + :math:`m` modes of a bigger (interferometer) matrix. + """ + + # This should be tested in the other file. + small_qft_matrix = compute_qft_matrix(self._first_m_modes) + test_matrix = eye(self._all_modes, dtype=complex) + + for i in range(self._first_m_modes): + for j in range(self._first_m_modes): + test_matrix[i][j] = small_qft_matrix[i][j] + + generated_qft_matrix = generate_qft_matrix_for_first_m_modes( + self._first_m_modes, self._all_modes + ) + + self.assertTrue((test_matrix == generated_qft_matrix).all) + + def test_random_phases_matrix_on_first_m_modes(self) -> None: + """ + Test if the :math:`m \\times m` random phases diagonal matrix is properly + embedded into the first :math:`m` modes of a bigger (interferometer) matrix. + """ + generated_matrix = generate_random_phases_matrix_for_first_m_modes( + self._first_m_modes, self._all_modes + ) + + test_matrix = eye(self._all_modes, dtype=complex) + + # Add random phases to the test matrix. + for i in range(self._first_m_modes): + test_matrix[i][i] = generated_matrix[i][i] + # Also test if random number are actually phases. + self.assertTrue( + isclose(abs(test_matrix[i][i]), 1), + f"|{test_matrix[i][i]}| = {abs(test_matrix[i][i])} != 1", + ) + + self.assertTrue((generated_matrix == test_matrix).all) diff --git a/tests/test_distribution_calculators.py b/tests/test_distribution_calculators.py index eda39a4..f46d0b6 100644 --- a/tests/test_distribution_calculators.py +++ b/tests/test_distribution_calculators.py @@ -14,8 +14,8 @@ BosonSamplingExperimentConfiguration, ) -from theboss.boson_sampling_utilities.permanent_calculators.chin_huh_permanent_calculator import ( - ChinHuhPermanentCalculator, +from theboss.boson_sampling_utilities.permanent_calculators.ryser_permanent_calculator import ( + RyserPermanentCalculator, ) @@ -31,17 +31,17 @@ def setUp(self) -> None: self._matrix = unitary_group.rvs(self._m) - self._permanent_calculator = ChinHuhPermanentCalculator(self._matrix) + self._permanent_calculator = RyserPermanentCalculator(self._matrix) - self._config: BosonSamplingExperimentConfiguration = None - - def _prepare_binned_input_test_setup(self) -> None: - self._prepare_test_setup(self._binned_input) - - def _prepare_std_input_test_setup(self) -> None: - self._prepare_test_setup(self._std_input) + self._config: BosonSamplingExperimentConfiguration def _prepare_test_setup(self, input_state) -> None: + """ + Boilerplate code for preparing the experiment configuration. + + :param input_state: + Input state (in 2nd quantization representation). + """ self._config = BosonSamplingExperimentConfiguration( interferometer_matrix=self._matrix, initial_state=input_state, @@ -54,7 +54,10 @@ def _prepare_test_setup(self, input_state) -> None: self._permanent_calculator.input_state = input_state def test_uniform_losses_calc_distribution_sum_for_standard_input(self) -> None: - self._prepare_std_input_test_setup() + """ + Test uniform losses calculator for standard input. + """ + self._prepare_test_setup(self._std_input) calc = BSDistributionCalculatorWithUniformLosses( self._config, self._permanent_calculator ) @@ -63,7 +66,10 @@ def test_uniform_losses_calc_distribution_sum_for_standard_input(self) -> None: self.assertTrue(isclose(sum(distribution), 1)) def test_fixed_losses_calc_distribution_sum_for_standard_input(self) -> None: - self._prepare_std_input_test_setup() + """ + Test fixed losses calculator for standard input. + """ + self._prepare_test_setup(self._std_input) calc = BSDistributionCalculatorWithFixedLosses( self._config, self._permanent_calculator ) @@ -71,7 +77,10 @@ def test_fixed_losses_calc_distribution_sum_for_standard_input(self) -> None: self.assertTrue(isclose(sum(distribution), 1)) def test_uniform_losses_calc_distribution_sum_for_binned_input(self) -> None: - self._prepare_binned_input_test_setup() + """ + Test uniform losses calculator for binned input. + """ + self._prepare_test_setup(self._binned_input) calc = BSDistributionCalculatorWithUniformLosses( self._config, self._permanent_calculator ) @@ -79,7 +88,10 @@ def test_uniform_losses_calc_distribution_sum_for_binned_input(self) -> None: self.assertTrue(isclose(sum(distribution), 1)) def test_fixed_losses_calc_distribution_sum_for_binned_input(self) -> None: - self._prepare_binned_input_test_setup() + """ + Test fixed losses calculator for standard input. + """ + self._prepare_test_setup(self._binned_input) calc = BSDistributionCalculatorWithFixedLosses( self._config, self._permanent_calculator ) diff --git a/tests/test_exact_distribution_calculator.py b/tests/test_exact_distribution_calculator.py index a922a8a..573c7c6 100644 --- a/tests/test_exact_distribution_calculator.py +++ b/tests/test_exact_distribution_calculator.py @@ -2,7 +2,7 @@ import unittest -from numpy import array, complex128, int64, allclose +from numpy import array, complex128, allclose from theboss.boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_factory import ( BSPermanentCalculatorFactory, @@ -17,7 +17,8 @@ class TestExactLossyBosonSamplingDistributionCalculator(unittest.TestCase): def setUp(self) -> None: - # Define some additional variables for more clear experiment configuration assignment. + # Define some additional variables for more clear experiment configuration + # assignment. self.number_of_particles_lost = 2 # Generate permutation matrix and define initial state. @@ -32,7 +33,7 @@ def setUp(self) -> None: dtype=complex128, ) - self.initial_state = array([1, 1, 1, 0, 0], dtype=int64) + self.initial_state = [1, 1, 1, 0, 0] # Create configuration object. self.experiment_configuration = BosonSamplingExperimentConfiguration( diff --git a/tests/test_full_approximation_simulation_strategies.py b/tests/test_full_approximation_simulation_strategies.py index 8ea85ba..56b961f 100644 --- a/tests/test_full_approximation_simulation_strategies.py +++ b/tests/test_full_approximation_simulation_strategies.py @@ -6,8 +6,7 @@ """ from theboss.boson_sampling_utilities.boson_sampling_utilities import ( - calculate_number_of_possible_lossy_n_particle_m_mode_output_states, - calculate_number_of_possible_n_particle_m_mode_output_states, + bosonic_space_dimension, ) from theboss.distribution_calculators.bs_exact_distribution_with_uniform_losses import ( BSDistributionCalculatorWithUniformLosses, @@ -132,9 +131,9 @@ def test_sampling_accuracy_for_fixed_losses_strategy(self) -> None: self._sampling_tvd_experiment_config.number_of_particles_left, ) - outcomes_number = calculate_number_of_possible_n_particle_m_mode_output_states( - n=self._sampling_tvd_experiment_config.number_of_particles_left, - m=self._sampling_tvd_experiment_config.number_of_modes, + outcomes_number = bosonic_space_dimension( + particles_number=self._sampling_tvd_experiment_config.number_of_particles_left, + modes_number=self._sampling_tvd_experiment_config.number_of_modes, ) self._perform_full_approximation_strategies_test( @@ -158,9 +157,10 @@ def test_distribution_accuracy_for_uniform_losses_strategy(self) -> None: tvd_bound = self._compute_uniform_loss_approximation_tvd_bound() - outcomes_number = calculate_number_of_possible_lossy_n_particle_m_mode_output_states( - n=self._sampling_tvd_experiment_config.number_of_particles_left, - m=self._sampling_tvd_experiment_config.number_of_modes, + outcomes_number = bosonic_space_dimension( + particles_number=self._sampling_tvd_experiment_config.number_of_particles_left, + modes_number=self._sampling_tvd_experiment_config.number_of_modes, + losses=True, ) self._perform_full_approximation_strategies_test( diff --git a/tests/test_network_simulation_strategies.py b/tests/test_network_simulation_strategies.py index 10a2f60..7a373dd 100644 --- a/tests/test_network_simulation_strategies.py +++ b/tests/test_network_simulation_strategies.py @@ -3,9 +3,9 @@ import unittest from copy import deepcopy from random import uniform -from typing import List, Union +from typing import List, Union, Tuple -from numpy import asarray, eye, ndarray +from numpy import eye, ndarray from scipy.stats import unitary_group from theboss.boson_sampling_simulator import BosonSamplingSimulator @@ -34,11 +34,11 @@ class TestBosonSamplingClassicalSimulationStrategies(unittest.TestCase): def setUp(self) -> None: - uniform_transmissivity = 0.8 + uniform_transmissivity: float = 0.5 - self._initial_state = [1, 1, 1, 1, 0, 0] - self._number_of_samples_for_experiments = 1000 - self._probability_of_error_in_distribution_calculation = 0.001 + self._initial_state: List[int] = [1, 1, 1, 1, 0, 0] + self._number_of_samples_for_experiments: int = 1000 + self._probability_of_error_in_distribution_calculation: float = 0.001 random_unitary = unitary_group.rvs(len(self._initial_state)) @@ -47,7 +47,7 @@ def setUp(self) -> None: self._experiment_configuration = BosonSamplingExperimentConfiguration( interferometer_matrix=self._lossy_interferometer_matrix, - initial_state=asarray(self._initial_state, dtype=int), + initial_state=self._initial_state, initial_number_of_particles=sum(self._initial_state), number_of_modes=len(self._initial_state), number_of_particles_lost=0, # Losses should only come from network. @@ -70,10 +70,12 @@ def setUp(self) -> None: calculator = BSDistributionCalculatorWithUniformLosses( self._experiment_configuration, self._permanent_calculator ) - self._possible_outcomes = calculator.get_outcomes_in_proper_order() - self._possible_outcomes_number = len(self._possible_outcomes) + self._possible_outcomes: List[ + Tuple[int, ...] + ] = calculator.get_outcomes_in_proper_order() + self._possible_outcomes_number: int = len(self._possible_outcomes) - self._tvd_bound_between_estimated_distributions = self.__compute_statistical_bound_on_two_approximate_distributions_tvd( + self._tvd_bound_between_estimated_distributions = self._compute_statistical_bound_on_two_approximate_distributions_tvd( outcomes_number=self._possible_outcomes_number ) @@ -84,15 +86,13 @@ def test_lossy_network_simulation_number_of_particles(self) -> None: will be lower in large losses' regime, but in case of lower losses this test may not hold. """ - self._strategy_factory.strategy_type = StrategyType.FIXED_LOSS strategy = self._strategy_factory.generate_strategy() simulator = BosonSamplingSimulator(strategy) lossy_average_number_of_particles = 0 samples = simulator.get_classical_simulation_results( - asarray(self._initial_state, dtype=int), - self._number_of_samples_for_experiments, + self._initial_state, self._number_of_samples_for_experiments, ) for sample in samples: lossy_average_number_of_particles += sum(sample) @@ -118,11 +118,11 @@ def test_lossy_network_with_uniform_losses_distribution_accuracy(self) -> None: estimated_distribution_calculator.calculate_approximate_distribution() ) - self.__check_if_given_distribution_is_close_to_lossy_network_distribution( + self._check_if_given_distribution_is_close_to_lossy_network_distribution( uniform_losses_distribution ) - def __check_if_given_distribution_is_close_to_lossy_network_distribution( + def _check_if_given_distribution_is_close_to_lossy_network_distribution( self, distribution: Union[ndarray, List[float]] ) -> None: """ @@ -154,7 +154,7 @@ def __check_if_given_distribution_is_close_to_lossy_network_distribution( self._tvd_bound_between_estimated_distributions, ) - def __compute_statistical_bound_on_two_approximate_distributions_tvd( + def _compute_statistical_bound_on_two_approximate_distributions_tvd( self, outcomes_number: int ) -> float: """ @@ -165,6 +165,8 @@ def __compute_statistical_bound_on_two_approximate_distributions_tvd( direction". :param outcomes_number: + + :return: """ return 2 * count_tv_distance_error_bound_of_experiment_results( @@ -182,20 +184,22 @@ def test_lossy_network_with_uniform_losses_distribution_accuracy_against_gcc( distribution with lossy inputs. """ generalized_cliffords_distribution = ( - self.__calculate_gcc_distribution_with_lossy_inputs() + self._calculate_gcc_distribution_with_lossy_inputs() ) self._strategy_factory.strategy_type = StrategyType.LOSSY_NET_GCC - self.__check_if_given_distribution_is_close_to_lossy_network_distribution( + self._check_if_given_distribution_is_close_to_lossy_network_distribution( generalized_cliffords_distribution ) - def __calculate_gcc_distribution_with_lossy_inputs(self) -> List[float]: + def _calculate_gcc_distribution_with_lossy_inputs(self) -> List[float]: """ This method calculates approximate distribution for lossy states using GC&C method. - :return: Approximate distribution. + + :return: + Approximate distribution. """ self._strategy_factory.strategy_type = StrategyType.GCC self._permanent_calculator.matrix = self._interferometer_matrix @@ -207,39 +211,34 @@ def __calculate_gcc_distribution_with_lossy_inputs(self) -> List[float]: for _ in range(self._number_of_samples_for_experiments): samples.extend( simulator.get_classical_simulation_results( - self.__get_uniformly_lossy_input_state(), 1 + self._get_uniformly_lossy_input_state(), 1 ) ) - return self.__calculate_distribution(samples, self._possible_outcomes) + return self._compute_distribution(samples, self._possible_outcomes) @staticmethod - def __calculate_distribution( - samples: List[ndarray], possible_outcomes: List[ndarray] + def _compute_distribution( + samples: List[Tuple[int, ...]], possible_outcomes: List[Tuple[int, ...]] ) -> List[float]: probabilities = [0] * len(possible_outcomes) for sample in samples: - for i in range(len(possible_outcomes)): - # Check if obtained result is one of possible outcomes. - # Expect all elements of resultant list to be True. - if all(sample == possible_outcomes[i]): - probabilities[i] += 1 - break + probabilities[possible_outcomes.index(sample)] += 1 for i in range(len(probabilities)): probabilities[i] /= len(samples) return probabilities - def __get_uniformly_lossy_input_state(self) -> ndarray: + def _get_uniformly_lossy_input_state(self) -> List[int]: """ This method assumes that losses are uniform and specified in the configuration of the experiment (in test case setup). :return: Input state after losses. """ - lossy_input = asarray(self._initial_state, dtype=int) + lossy_input = deepcopy(self._initial_state) for i in range(len(self._initial_state)): for _ in range(self._initial_state[i]): if ( @@ -283,18 +282,21 @@ def test_uniformly_lossy_network_on_occupied_modes_and_higher_losses_on_empty_mo ) self._strategy_factory.experiment_configuration = self._experiment_configuration - self.__check_if_given_distribution_is_close_to_lossy_network_distribution( + self._check_if_given_distribution_is_close_to_lossy_network_distribution( distribution_with_huge_losses_on_empty_modes ) def test_distance_of_gcc_with_lossy_network_and_lossy_input(self) -> None: - + """ + Test if the simulation using lossy net and the simulation using lossy input + yields the same results. + """ distribution_with_lossy_net = ( - self.__calculate_gcc_distribution_with_lossy_network() + self._calculate_gcc_distribution_with_lossy_network() ) distribution_with_lossy_input = ( - self.__calculate_gcc_distribution_with_lossy_inputs() + self._calculate_gcc_distribution_with_lossy_inputs() ) distance_between_distributions = count_total_variation_distance( @@ -306,7 +308,7 @@ def test_distance_of_gcc_with_lossy_network_and_lossy_input(self) -> None: self._tvd_bound_between_estimated_distributions, ) - def __calculate_gcc_distribution_with_lossy_network(self) -> List[float]: + def _calculate_gcc_distribution_with_lossy_network(self) -> List[float]: """ This method calculates approximate distribution for lossy states using generalized Clifford & Clifford method. @@ -320,8 +322,7 @@ def __calculate_gcc_distribution_with_lossy_network(self) -> List[float]: strategy = self._strategy_factory.generate_strategy() simulator = BosonSamplingSimulator(strategy) samples = simulator.get_classical_simulation_results( - asarray(self._initial_state, dtype=int), - self._number_of_samples_for_experiments, + self._initial_state, self._number_of_samples_for_experiments, ) - return self.__calculate_distribution(samples, self._possible_outcomes) + return self._compute_distribution(samples, self._possible_outcomes) diff --git a/tests/test_nonuniform_losses_simulation_strategies.py b/tests/test_nonuniform_losses_simulation_strategies.py new file mode 100644 index 0000000..7239c67 --- /dev/null +++ b/tests/test_nonuniform_losses_simulation_strategies.py @@ -0,0 +1,317 @@ +__author__ = "Tomasz Rybotycki" + +""" + The aim of this script is to test non-uniform losses simulation strategies are + accurate enough in the presence of such losses. Here, we focus only on the + simulations with NON-UNIFORM losses. Uniform losses and lossless simulations + accuracy tests for these strategies have been placed in the other files. +""" + +import unittest +from scipy.stats import unitary_group +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( + bosonic_space_dimension, + generate_possible_states, +) +from theboss.quantum_computations_utilities import ( + compute_minimal_number_of_samples_for_desired_accuracy, + count_total_variation_distance_dicts, +) +from typing import List, DefaultDict, Tuple, Dict +from numpy import ndarray, zeros_like, block, sqrt +from numpy.linalg import svd +from collections import defaultdict + +from theboss.boson_sampling_utilities.permanent_calculators.ryser_permanent_calculator import ( + RyserPermanentCalculator, +) + +from theboss.distribution_calculators.bs_exact_distribution_with_uniform_losses import ( + BSDistributionCalculatorWithUniformLosses, + BosonSamplingExperimentConfiguration, +) + +from theboss.simulation_strategies.simulation_strategy_factory import ( + StrategyType, + SimulationStrategyFactory, + SimulationStrategyInterface, +) + + +class TestNonuniformLossesStrategies(unittest.TestCase): + """ + A class with unit test for nonuniform sampling simulations. + """ + + def setUp(self) -> None: + + # Statistical error settings. + self._desired_statistical_accuracy: float = 0.1 # So the tests go quick(er). + self._probability_of_error: float = 0.01 + + self._m: int = 2 # Block matrix modes number. + + # Interferometer matrix preparation. + self._blocks_transmissivities: List[float] = [0.5, 0.2] + block_matrices: List[ndarray] = [unitary_group.rvs(self._m) for _ in range(2)] + self._interferometer_matrix: ndarray = self._get_interferometer_matrix( + block_matrices + ) + + # Compute distributions + self._distribution: Dict[ + Tuple[int, ...], float + ] = self._get_theoretical_distribution(block_matrices) + + self._initial_state: List[int] = [1 for _ in range(self._m * 2)] + + self._config: BosonSamplingExperimentConfiguration + self._config = BosonSamplingExperimentConfiguration( + self._interferometer_matrix, + self._initial_state, + self._m * 2, + self._m * 2, + 0, + self._m * 2, + ) + self._permanent_calculator: RyserPermanentCalculator = RyserPermanentCalculator( + self._interferometer_matrix, self._initial_state, None + ) + self._strategy_factory = SimulationStrategyFactory( + experiment_configuration=self._config, + bs_permanent_calculator=self._permanent_calculator, + ) + + self._strategy: SimulationStrategyInterface + + def _get_samples_number(self, modes_number: int) -> int: + """ + Computes the minimal number of samples that is required to obtained desired + (hard coded) accuracy with high (hard coded) probability. This takes into + account only the statistical error. + + Notice that we don't have to take into account the whole :math:`2m \\times 2m` + bosonic space, as the matrix we're analysing has a block form. + + :param modes_number: + The number of modes (and also particles, since we're in the :math:`m = n` + regime) that we consider in the experiment. + + :return: + The minimal number of particles required to obtain specified accuracy with + high probability. + """ + + return compute_minimal_number_of_samples_for_desired_accuracy( + outcomes_number=bosonic_space_dimension(modes_number, modes_number, True) + ** 2, + error_probability=self._probability_of_error, + expected_distance=self._desired_statistical_accuracy, + ) + + def _get_interferometer_matrix(self, block_matrices: List[ndarray]) -> ndarray: + """ + Prepares matrices for the sampling. We do this by sampling two Haar-random + matrices from the unitary group and applying (DIFFERENT) uniform losses to them. + Then the interferometer matrix that we're interested in is in block form + :math:`M = [[A, 0]^T, [0, B]^T]`. + + :param block_matrices: + Two :math:`m \\times m` matrices sampled Haar-randomly from the unitary + group. + + :return: + A list of matrices that will be used for the simulations. + """ + zeros_matrix: ndarray = zeros_like(block_matrices[0]) + return block( + [ + [ + block_matrices[0] * sqrt(self._blocks_transmissivities[0]), + zeros_matrix, + ], + [ + zeros_matrix, + block_matrices[1] * sqrt(self._blocks_transmissivities[1]), + ], + ] + ) + + def _get_theoretical_distribution( + self, block_matrices: List[ndarray] + ) -> Dict[Tuple[int, ...], float]: + """ + Computes distribution of large interferometer made from 2 smaller block + interferometers with uniform losses. + + :param block_matrices: + Already sampled block matrices. + + :return: + Theoretical distribution for BS with specified matrix. + """ + + possible_states: List[Tuple[int, ...]] = generate_possible_states( + self._m, self._m, losses=True + ) + partial_distributions: List[List[float]] = [] + + # Get partial distributions. + for i in range(len(block_matrices)): + + transmissivity: float = self._blocks_transmissivities[i] + + block_input_state: List[int] = [1 for _ in range(self._m)] + + # Extract losses from the block matrix + u: ndarray + s: List[float] + v: ndarray + u, s, v = svd(block_matrices[i]) + lossless_block_matrix: ndarray = u @ v + + config: BosonSamplingExperimentConfiguration + config = BosonSamplingExperimentConfiguration( + lossless_block_matrix, + block_input_state, + self._m, + self._m, + 0, + self._m, + transmissivity, + ) + + permanent_calculator = RyserPermanentCalculator( + lossless_block_matrix, block_input_state, None + ) + + distribution_calculator = BSDistributionCalculatorWithUniformLosses( + config, permanent_calculator + ) + + partial_distributions.append( + distribution_calculator.calculate_probabilities_of_outcomes( + possible_states + ) + ) + + distribution: DefaultDict[Tuple[int, ...], float] = defaultdict(lambda: 0) + + # Join partial distributions. + for i in range(len(possible_states)): + for j in range(len(possible_states)): + distribution[possible_states[i] + possible_states[j]] = ( + partial_distributions[0][i] * partial_distributions[1][j] + ) + + return distribution + + @staticmethod + def _compute_frequencies( + counts: Dict[Tuple[int, ...], int] + ) -> DefaultDict[Tuple[int, ...], float]: + """ + Computes empirical frequencies from the counts. + + :param counts: + State counts gathered during the experiments. + + :return: + Empirical frequencies of the states. + """ + frequencies: DefaultDict[Tuple[int, ...], float] = defaultdict(lambda: 0) + samples_number: int = 0 + + for state in counts: + samples_number += counts[state] + + for state in counts: + frequencies[state] = counts[state] / samples_number + + return frequencies + + def _compute_bobs_approximation_tvd_bound(self): + """ + Compute bounds for BOBS strategy. For details check [2], formula (22). + + :return: TVD bound for BOBS algorithm. + """ + eta_eff = max(self._blocks_transmissivities) + n = 2 * self._m + + bound = pow(eta_eff, 2) / 2 + bound *= n - self._strategy_factory._experiment_configuration.hierarchy_level + bound += eta_eff * (1 - eta_eff) / 2 + + return bound + + def _perform_accuracy_test(self, approximation_distance_bound: float = 0) -> None: + """ + Boilerplate code for the non-uniform lossy strategies tests. It takes care of + producing strategies, samples, frequencies and finally + :param approximation_distance_bound: + + :return: + """ + # Prepare strategy. + self._strategy = self._strategy_factory.generate_strategy() + + # Get samples. + samples_number: int = self._get_samples_number(self._m) + samples = self._strategy.simulate(self._initial_state, samples_number) + + # Get counts. + counts: DefaultDict[Tuple[int, ...], int] = defaultdict(lambda: 0) + + for sample in samples: + counts[sample] += 1 + + # Get frequencies. + frequencies: Dict[Tuple[int, ...], float] = self._compute_frequencies(counts) + + tvd: float = count_total_variation_distance_dicts( + frequencies, self._distribution + ) + + self.assertTrue( + tvd < self._desired_statistical_accuracy + approximation_distance_bound, + f"TVD ({tvd}) is greater than expected (" + f"{self._desired_statistical_accuracy + approximation_distance_bound})!", + ) + + def test_lossy_net_gcc_accuracy(self) -> None: + """ + Test accuracy of the non-uniform lossy net GCC Strategy in the presence of the + non-uniform losses. + """ + self._strategy_factory.strategy_type = StrategyType.LOSSY_NET_GCC + self._perform_accuracy_test() + + def test_exact_bobs_accuracy(self) -> None: + """ + Test accuracy of the general BOBS Strategy in the presence of the non-uniform + losses without approximations. + """ + self._strategy_factory.strategy_type = StrategyType.BOBS + self._strategy_factory.experiment_configuration.hierarchy_level = self._m + self._perform_accuracy_test() + + def test_small_approximation_bobs_accuracy(self) -> None: + """ + Test accuracy of the general BOBS strategy in the presence of the non-uniform + losses with small approximation. + """ + self._strategy_factory.strategy_type = StrategyType.BOBS + self._strategy_factory.experiment_configuration.hierarchy_level = 2 + approximation_bound: float = self._compute_bobs_approximation_tvd_bound() + self._perform_accuracy_test(approximation_bound) + + def test_high_approximation_bobs_accuracy(self) -> None: + """ + Test accuracy of the general BOBS strategy in the presence of the non-uniform + losses with high approximation. + """ + self._strategy_factory.strategy_type = StrategyType.BOBS + self._strategy_factory.experiment_configuration.hierarchy_level = 1 + approximation_bound: float = self._compute_bobs_approximation_tvd_bound() + self._perform_accuracy_test(approximation_bound) diff --git a/tests/test_quantum_computations_utilities.py b/tests/test_quantum_computations_utilities.py deleted file mode 100644 index 34d8c5a..0000000 --- a/tests/test_quantum_computations_utilities.py +++ /dev/null @@ -1,57 +0,0 @@ -__author__ = "Tomasz Rybotycki" - -import unittest - -from numpy import conjugate, identity, ndarray, transpose, allclose -from scipy.stats import unitary_group - - -class TestQuantumComputationsUtilities(unittest.TestCase): - def setUp(self) -> None: - self.matrix_size = 5 - self.number_of_matrices_for_distinct_elements_check = 10 # Should be >= 2. - pass - - def test_unitarity_of_matrices_generated_by_haar_random_unitary_method( - self, - ) -> None: - random_matrix = unitary_group.rvs(self.matrix_size) - random_matrix_hermitian_adjoint = transpose(conjugate(random_matrix)) - product_of_matrix_and_hermitian_adjoint = random_matrix_hermitian_adjoint.dot( - random_matrix - ) - - identity_matrix = identity(self.matrix_size, dtype=complex) - - self.assertTrue( - self.__are_matrices_elementwise_close( - identity_matrix, product_of_matrix_and_hermitian_adjoint - ) - ) - - @staticmethod - def __are_matrices_elementwise_close(matrix1: ndarray, matrix2: ndarray) -> bool: - # I assume that there are only rectangular matrices - if len(matrix2) != len(matrix1): - return False - if len(matrix2[0]) != len(matrix1[0]): - return False - - return allclose(matrix1, matrix2) - - def test_haar_random_unitary_matrices_generation_differences(self) -> None: - generated_unitaries_matrices = [ - unitary_group.rvs(self.matrix_size) - for _ in range(self.number_of_matrices_for_distinct_elements_check) - ] - - are_all_matrices_different = [] - - for i in range(self.number_of_matrices_for_distinct_elements_check): - for j in range(i + 1, self.number_of_matrices_for_distinct_elements_check): - are_all_matrices_different.append( - self.__are_matrices_elementwise_close( - generated_unitaries_matrices[i], generated_unitaries_matrices[j] - ) - ) - self.assertTrue(not any(are_all_matrices_different)) diff --git a/theboss/boson_sampling_simulator.py b/theboss/boson_sampling_simulator.py index 733a7ff..3e1b806 100644 --- a/theboss/boson_sampling_simulator.py +++ b/theboss/boson_sampling_simulator.py @@ -4,9 +4,11 @@ This file holds an implementation of generic boson sampling experiment simulator. """ -from typing import List +# TODO TR: Expand the simulator to justify it's existence. +# - Add frequencies counting +# - ??? -from numpy import ndarray +from typing import List, Sequence, Tuple from .simulation_strategies.simulation_strategy_interface import ( SimulationStrategyInterface, @@ -18,6 +20,6 @@ def __init__(self, simulation_strategy: SimulationStrategyInterface) -> None: self._simulation_strategy = simulation_strategy def get_classical_simulation_results( - self, input_state: ndarray, samples_number: int = 1 - ) -> List[ndarray]: + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int]]: return self._simulation_strategy.simulate(input_state, samples_number) diff --git a/theboss/boson_sampling_utilities/boson_sampling_utilities.py b/theboss/boson_sampling_utilities/boson_sampling_utilities.py index 5f6ac2f..338cf85 100644 --- a/theboss/boson_sampling_utilities/boson_sampling_utilities.py +++ b/theboss/boson_sampling_utilities/boson_sampling_utilities.py @@ -3,27 +3,20 @@ """ This script contains various auxiliary methods useful for boson sampling experiments. - - TODO TR: Consider making this file a package along with exact distribution - calculator. """ import itertools -from typing import List, Optional, Sequence, Tuple, Set +from typing import List, Optional, Sequence, Tuple from numpy import ( + ndarray, array, - asarray, block, complex128, diag, eye, - int64, - ndarray, - power, sqrt, transpose, - zeros, zeros_like, square, flip, @@ -35,105 +28,149 @@ from scipy.special import binom, factorial from numpy.random import rand -from ..quantum_computations_utilities import compute_qft_matrix +from theboss.quantum_computations_utilities import compute_qft_matrix -# TODO TR: Change the names of these two methods to reflect that it will denote the -# state description in the 1st and 2nd quantization (with the remark that the -# first quantization one will not be symmetrized, but just "a representative") -def particle_state_to_modes_state( - particle_state: ndarray, observed_modes_number: int -) -> ndarray: - modes_state = zeros(observed_modes_number, dtype=int) +def mode_assignment_to_mode_occupation( + modes_assignment: Sequence[int], observed_modes_number: int = 0 +) -> Tuple[int]: + """ + Given a bosonic "state" in a mode assignment representation, return the state in the + mode occupation description. - # Adding the particle to it's mode. - for particles_mode in asarray(particle_state, dtype=int64): - modes_state[particles_mode] += 1 + :param modes_assignment: + A "state" in a mode assignment representation. + :param observed_modes_number: + Number of observed modes. Necessary if it's greater than suggested by given + state. - return modes_state + :return: + The state in a mode occupation representation (as a tuple). + """ + if observed_modes_number == 0: + observed_modes_number = max(modes_assignment) + 1 + modes_occupation = [0 for _ in range(observed_modes_number)] -def modes_state_to_particle_state(mode_state: ndarray) -> ndarray: - """ - Return given mode-basis state in particle basis. + for particles_mode in modes_assignment: + modes_occupation[particles_mode] += 1 - :param mode_state: Input state in mode-basis. - :return: Given mode-basis state in particle basis. + return tuple(modes_occupation) + + +def mode_occupation_to_mode_assignment(mode_occupation: Sequence[int]) -> Tuple[int]: """ + Return the state (given in mode occupation representation) in the mode + assignment representation. - particles_number = int(sum(mode_state)) - number_of_observed_modes = len(mode_state) - modes = mode_state.copy() - particles_state = zeros(particles_number, dtype=int) + :param mode_occupation: + Input state in mode-basis. - i = k = 0 - while i < number_of_observed_modes: + :return: + Given mode-basis state in particle basis (as a tuple). + """ + mode_assignment = tuple() - if modes[i] > 0: - modes[i] -= 1 - particles_state[k] = i - k += 1 - else: - i += 1 + for i in range(len(mode_occupation)): + for j in range(mode_occupation[i]): + mode_assignment += (i,) - return particles_state + return mode_assignment -def generate_possible_outputs( - number_of_particles: int, number_of_modes: int, consider_loses: bool = False -) -> List[ndarray]: - if number_of_particles < 0 or number_of_modes < 1: - return [] - if number_of_particles == 0: - return [zeros(number_of_modes)] +def generate_possible_states( + particles_number: int, modes_number: int, losses: bool = False +) -> List[Tuple[int]]: + """ + This method generates all possible :math:`m`-mode states. By default, it's + restricted to only :math:`n`-particle states, but it can also return lossy states. + + :param particles_number: + The maximal number :math:`n` of particles. + :param modes_number: + The number :math:`m` of considered modes. + :param losses: + A flag for lossy states generation. + + :return: + A list of possible (lossy) states (as tuples). + """ + if particles_number < 0 or modes_number < 1: + return list() + if particles_number == 0: + return [tuple([0 for _ in range(modes_number)])] - outputs = [] - starting_number_of_particles = number_of_particles + states = [] + starting_number_of_particles = particles_number - if consider_loses: + if losses: starting_number_of_particles = 0 - for n in range(starting_number_of_particles, number_of_particles + 1): - outputs.extend(_generate_possible_n_particle_outputs(n, number_of_modes)) + for particles_number in range(starting_number_of_particles, particles_number + 1): + states.extend( + _generate_possible_n_particle_states(particles_number, modes_number) + ) + + return states - return outputs +def _generate_possible_n_particle_states(n: int, modes_number: int) -> List[Tuple[int]]: + """ + Generates all possible :math:`n` particle states. -def _generate_possible_n_particle_outputs( - number_of_particles: int, number_of_modes: int -) -> List[ndarray]: - outputs = [] + :param n: + The number of particles in resultant states. + :param modes_number: + The number :math:`m` of modes in resultant states. - output = zeros(number_of_modes, dtype=int) - output[0] = number_of_particles - outputs.append(output) + :return: + A list of possible :math:`m`-mode `n`-particle states in 2nd quantization + representation (as tuples of ints). + """ + states = [] - while outputs[-1][number_of_modes - 1] < number_of_particles: + state: List[int] = [0 for _ in range(modes_number)] - k = number_of_modes - 1 - while outputs[-1][k - 1] == 0: + state[0] = n + states.append(state) + + while states[-1][modes_number - 1] < n: + + k = modes_number - 1 + while states[-1][k - 1] == 0: k -= 1 - output = outputs[-1].copy() - output[k - 1] -= 1 - output[k:] = 0 - output[k] = number_of_particles - sum(output) + state = states[-1].copy() + state[k - 1] -= 1 + + for i in range(k, len(state)): + state[i] = 0 - outputs.append(output) + state[k] = n - sum(state) - sorted_outputs = sorted([tuple(output) for output in outputs], reverse=True) + states.append(state) - return [array(output) for output in sorted_outputs] + sorted_states = sorted([tuple(output) for output in states], reverse=True) + return sorted_states -def generate_lossy_inputs( - initial_state: ndarray, number_of_particles_left: int -) -> List[ndarray]: + +def generate_lossy_n_particle_input_states( + initial_state: Sequence[int], number_of_particles_left: int +) -> List[Tuple[int]]: """ - From initial state generate all possible input states after losses application. - :param initial_state: The state we start with. - :param number_of_particles_left: Number of particles after losses application. - :return: A list of lists representing initial states after losses. + From initial state generate all possible input states, with required number of + particles after losses application. + + Notice that it can also be done using the Guan Codes! + + :param initial_state: + The state we start with. + :param number_of_particles_left: + Number of particles after losses application. + + :return: + A list of tuples of ints representing initial states after losses. """ x0 = [] number_of_modes = len(initial_state) @@ -145,17 +182,15 @@ def generate_lossy_inputs( lossy_inputs_hashes = [] if sum(initial_state) == 0: - return [initial_state] + return [tuple(initial_state)] # Symmetrization. for combination in itertools.combinations( list(range(initial_number_of_particles)), number_of_particles_left ): - lossy_input_in_particle_basis = array( - [x0[el] for el in combination], dtype=int64 - ) + lossy_input_in_particle_basis = array([x0[el] for el in combination], dtype=int) - lossy_input = particle_state_to_modes_state( + lossy_input = mode_assignment_to_mode_occupation( lossy_input_in_particle_basis, number_of_modes ) @@ -168,104 +203,193 @@ def generate_lossy_inputs( return lossy_inputs_list -def calculate_number_of_possible_n_particle_m_mode_output_states(n: int, m: int) -> int: +def bosonic_space_dimension( + particles_number: int, modes_number: int, losses: bool = False +) -> int: """ - Calculates the number of possible output states with n particles placed around m modes. - - This is basically the same answer as to in how many possible combinations can we put n objects in m bins. It's - also a dimension of n-particle m-mode bosonic space. Stars-and-bars argument applies here. - - :param n: Number of particles. - :param m: Number of modes. - :return: Dimension of n-particle m-mode bosonic space. + Calculates the number of possible states with specified number of modes and + maximal number of particles. + + This is basically the same answer as to in how many possible combinations can we + put :math:`n` objects in :math:`m` bins. It's also a dimension of :math:`m`-mode + bosonic space with at most :math:`n` particles, or exactly :math:`n` if we don't + consider losses. Stars-and-bars argument applies here. + + :param particles_number: + Number :math:`n` of particles. If lossy states are considered, this is the + maximal number of particles. + :param modes_number: + Number :math:`m` of considered modes. + + :return: + Dimension of (possibly lossy) :math:`m`-mode bosonic space with at most + :math:`n` particles. """ - return round(binom(n + m - 1, n)) + dimension: int = round(binom(particles_number + modes_number - 1, particles_number)) -def calculate_number_of_possible_lossy_n_particle_m_mode_output_states( - n: int, m: int -) -> int: - """ - Calculates the number of possible output states with N <= n particles placed around m modes. + if not losses: + return dimension - :param n: Number of particles. - :param m: Number of modes. - :return: Dimension of n-particle m-mode bosonic space. - """ - states_number = 0 - for N in range(n + 1): - states_number += round(binom(N + m - 1, N)) - return states_number + for n in range(particles_number): + dimension += round(binom(n + modes_number - 1, n)) + + return dimension def get_modes_transmissivity_values_from_matrix( - lossy_interferometer_matrix: ndarray, + lossy_interferometer_matrix: Sequence[Sequence[complex128]], ) -> List[float]: + """ + Given a lossy interferometer matrix this method extracts from it the information + about the transmissivity of the modes. Given that SVD decomposition is not unique + the returned order of the transmissivities doesn't correspond to the order on modes + in general. + + It so happens that the transmissivities in the lossy interferometer matrix are + described by the roots of the singular values (in our case the eigenvalues of + the matrix). + + :param lossy_interferometer_matrix: + A lossy interferometer matrix from which the information about the + transmissivities will be extracted. + + :return: + Unordered list of modes transmissivities extracted from given matrix. + """ v_matrix, singular_values, u_matrix = svd(lossy_interferometer_matrix) return square(flip(singular_values)) -def _calculate_singular_values_matrix_expansion( - singular_values_vector: ndarray, -) -> ndarray: - vector_of_squared_expansions = 1.0 - power(singular_values_vector, 2) - for i in range(len(vector_of_squared_expansions)): - if vector_of_squared_expansions[i] < 0: - vector_of_squared_expansions[i] = 0 +def _compute_loss_transfer_matrix_expansion(transmissivities: ndarray,) -> ndarray: + """ + Returns extension part of the singular values' matrix resulting from the SVD + decomposition of the (presumably lossy) interferometer. + + :param transmissivities: + The values of transmissivities obtained from the squares of the singular values + of the (presumably lossy) interferometer matrix' SVD. + + :return: + One of the block matrices of singular values' matrix of the SVD of the given + (presumably lossy) interferometer in expanded space. + """ + losses_vector = 1.0 - transmissivities + for i in range(len(losses_vector)): + if losses_vector[i] < 0: + losses_vector[i] = 0 - expansion_values = sqrt(vector_of_squared_expansions) + expansion_values = sqrt(losses_vector) return diag(expansion_values) def prepare_interferometer_matrix_in_expanded_space( - interferometer_matrix: ndarray, + interferometer_matrix: Sequence[Sequence[complex]], ) -> ndarray: + """ + This operation is required for the simulation of BS experiment with mode dependent + (non-uniform) losses. + + One way to perform such simulation is to expand the experiment from the + :math:`m \\times m` to :math:`2m \\times 2m` one and treat the additional + modes as the space for the lost particles. Then the loss of a particle can be + implemented as transferring it to one of the additional modes. By the end of the + simulation the additional modes have to be trimmed. + + Notice that this is not necessary in the case of uniform losses, but can also be + used for it. + + Although it's not necessary, the method returns a unitary matrix. + + :param interferometer_matrix: + An (possibly lossy) interferometer matrix to be expanded. + + :return: + Given interferometer in the expanded sampling space. + """ v_matrix, singular_values, u_matrix = svd(interferometer_matrix) - expansions_zeros = zeros_like(v_matrix) - expansions_ones = eye(len(v_matrix)) + extension_zeros_matrix = zeros_like(v_matrix) + extension_identity_matrix = eye(len(v_matrix)) + expanded_v = block( - [[v_matrix, expansions_zeros], [expansions_zeros, expansions_ones]] + [ + [v_matrix, extension_zeros_matrix], + [extension_zeros_matrix, extension_identity_matrix], + ] ) + expanded_u = block( - [[u_matrix, expansions_zeros], [expansions_zeros, expansions_ones]] + [ + [u_matrix, extension_zeros_matrix], + [extension_zeros_matrix, extension_identity_matrix], + ] ) - singular_values_matrix_expansion = _calculate_singular_values_matrix_expansion( - singular_values + + transmissivities = array([s ** 2 for s in singular_values]) + loss_transfer_extension_matrix = _compute_loss_transfer_matrix_expansion( + transmissivities ) - singular_values_expanded_matrix = block( + + # This is the most specific thing here. + expanded_singular_values_matrix = block( [ - [diag(singular_values), singular_values_matrix_expansion], - [singular_values_matrix_expansion, diag(singular_values)], + [diag(singular_values), loss_transfer_extension_matrix], + [loss_transfer_extension_matrix, diag(singular_values)], ] ) - return expanded_v @ singular_values_expanded_matrix @ expanded_u + return expanded_v @ expanded_singular_values_matrix @ expanded_u -def compute_state_types( +def generate_state_types( modes_number: int, particles_number: int, losses: bool = False -) -> List[List[int]]: - # Partitions generating code. - # Taken from https://stackoverflow.com/questions/10035752/elegant-python-code-for-integer-partitioning/10036764 - def partitions(n, I=1): +) -> List[Tuple[int, ...]]: + """ + Returns a list of (possibly lossy) state types understood in the same sense as + in [1]. We also assume that the modes occupations of the state types are ordered + in the non-increasing order, as in [1]. + + :param modes_number: + The number :math:`m` of considered modes. + :param particles_number: + The maximal number :math:`n` of considered + :param losses: + A flag informing if losses should be considered. + :return: + A list of (possibly lossy) state types given by the lists of ints. + """ + + def _partitions(n, I=1): + """ + A method for generating integer partitions. + Credits to + https://stackoverflow.com/questions/10035752/elegant-python-code-for-integer-partitioning/10036764 + + :param n: + The number for which integer partitions will be returned. + :param I: + A control parameter. + :return: + The integer partitions of :math:`n`. + """ yield (n,) for i in range(I, n // 2 + 1): - for p in partitions(n - i, i): + for p in _partitions(n - i, i): yield (i,) + p - all_partitions = list(partitions(particles_number)) + all_partitions = list(_partitions(particles_number)) if losses: for i in range(particles_number): - all_partitions += list(partitions(i)) + all_partitions += list(_partitions(i)) state_types = [] for partition in all_partitions: if len(partition) > modes_number: continue - # We describe state type by a vector in descending order. + # We describe state type by a vector in descending order [1]. state_type = sorted(partition, reverse=True) state_types.append(state_type) @@ -273,16 +397,35 @@ def partitions(n, I=1): while len(state_types[i]) < modes_number: state_types[i].append(0) - return state_types + return [tuple(state_type) for state_type in state_types] def compute_number_of_state_types( modes_number: int, particles_number: int, losses=False ) -> int: + """ + Computes the number of state types (as defined in [1]) with given number of modes + and particles. It also allows the case when the losses are considered. + + The number of state types, given modes number :math:`m` and particles number + :math:`n` is equal to the number of integer partitions of :math:`n` of length + at most :math:`m`. In case if losses are allowed one has to sum up the number of + partitions for numbers :math:`p` of particles such that :math:`0 \\leq p \\leq n`. + + :param modes_number: + The number :math:`m` of considered modes. + :param particles_number: + The maximal number :math:`n` of allowed particles. + :param losses: + A flag indicating whether the lossy states should also be considered. + :return: + The number of (possibly lossy) state types for given number of modes and + particles. + """ state_types_number = 0 for k in range(1, modes_number + 1): - state_types_number += compute_number_of_k_element_integer_n_partitions( + state_types_number += compute_number_of_k_element_integer_partitions_of_n( k, particles_number ) @@ -291,48 +434,26 @@ def compute_number_of_state_types( for particles_num in range(particles_number): for k in range(1, modes_number + 1): - state_types_number += compute_number_of_k_element_integer_n_partitions( + state_types_number += compute_number_of_k_element_integer_partitions_of_n( k, particles_num ) return state_types_number -def compute_number_of_k_element_integer_n_partitions(k: int, n: int) -> int: - if k == 1: - return 1 - - if k > n or n == 0 or k < 1: - return 0 - - integer_partitions_number = compute_number_of_k_element_integer_n_partitions( - k, n - k - ) - integer_partitions_number += compute_number_of_k_element_integer_n_partitions( - k - 1, n - 1 - ) - - return integer_partitions_number - - -def compute_maximally_unbalanced_types( - modes_number: int, particles_number: int -) -> List[List[int]]: - maximally_unbalanced_types = [] - all_types = compute_state_types( - particles_number=particles_number, modes_number=modes_number - ) - - for state_type in all_types: - if state_type.count(1) == len(state_type) - 1 or state_type.count(1) == len( - state_type - ): - maximally_unbalanced_types.append(state_type) - - return maximally_unbalanced_types - - def compute_number_of_states_of_given_type(state_type: Sequence[int]) -> int: + """ + Returns the number of possible states of given type. The two states are of the same + type if they can be mapped into each other by a mode-permuting matrix. The number + of states of possible type is therefore the number of distinct permutations of given + state type. + + :param state_type: + A state type as defined in [1]. Multiple states can be of the same type so + this can also be understood as a representative of desired state type. + :return: + The number of states of given type. + """ modes_number = len(state_type) counts = [] @@ -354,29 +475,46 @@ def compute_number_of_states_of_given_type(state_type: Sequence[int]) -> int: return number_of_states_of_given_type -def compute_state_of_given_type(state_type: Sequence[int]) -> List[Tuple[int]]: - - if len(state_type) == 0: - return [tuple()] - - states_of_type: Set[Tuple[int, ...]] = set() - - working_state: List[int] = list(state_type) - modes_number: int = len(state_type) - - for _ in range(modes_number): - val: int = working_state.pop(0) - substates: List[Tuple[int]] = compute_state_of_given_type(working_state) +def compute_number_of_k_element_integer_partitions_of_n(k: int, n: int) -> int: + """ + Return the number of :math:`k`-element partitions of integer :math:`n`. + + :param k: + The size of the partitions. + :param n: + The number for which the number of :math:`k`-element partitions is computed. + :return: + The number of :math:`k`-element partitions of :math:`n`. + """ + if k == 1: + return 1 - for substate in substates: - states_of_type.add((val,) + substate) + if k > n or n == 0 or k < 1: + return 0 - working_state.append(val) + integer_partitions_number = compute_number_of_k_element_integer_partitions_of_n( + k, n - k + ) + integer_partitions_number += compute_number_of_k_element_integer_partitions_of_n( + k - 1, n - 1 + ) - return list(states_of_type) + return integer_partitions_number def generate_qft_matrix_for_first_m_modes(m: int, all_modes_number: int) -> ndarray: + """ + Prepares a matrix which describes a gate that applies QFT on the first :math:`m` + modes and identity on the rest. + + :param m: + The number of initial modes on which the QFT will be applied. + :param all_modes_number: + The number of all the modes and, consequently, the size of the resultant matrix. + :return: + A square matrix of a size given by the number of all modes which applies QFT + on the first :math:`m` modes. + """ small_qft_matrix = compute_qft_matrix(m) qft_matrix = eye(all_modes_number, dtype=complex128) qft_matrix[0:m, 0:m] = small_qft_matrix @@ -386,6 +524,19 @@ def generate_qft_matrix_for_first_m_modes(m: int, all_modes_number: int) -> ndar def generate_random_phases_matrix_for_first_m_modes( m: int, all_modes_number: int ) -> ndarray: + """ + Returns a matrix that applies random phases on the first :math:`m` modes and the + identity on all the others. + + :param m: + The number of initial modes on which the random phases should be applied. + :param all_modes_number: + The total number of considered modes and, consequently, the size of the + resultant matrix. + :return: + A matrix describing an operation of applying random phases on the first + :math:`m` modes and the identity on all the others. + """ random_phases = ones(all_modes_number, dtype=complex128) # [1, 1, 1, 1, 1, 1] random_phases[0:m] = exp(1j * 2 * pi * rand(m)) return diag(random_phases) @@ -393,68 +544,83 @@ def generate_random_phases_matrix_for_first_m_modes( class EffectiveScatteringMatrixCalculator: """ - In many methods of Boson Sampling simulations an effective scattering matrix has to be calculated. Therefore - I decided to implement an calculator that'd be used if every single one of these methods. + In many methods of Boson Sampling simulations an effective scattering matrix has + to be calculated. Therefore, I decided to implement a calculator that'd be used + in every single one of these methods. + + For the method to work properly the input and the output states should both + be provided in the 2nd quantization representation (mode occupation). """ def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex128]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: if output_state is None: - output_state = array([], dtype=int64) + output_state = list() if input_state is None: - input_state = array([], dtype=int64) - self.__matrix = matrix - self.__input_state = input_state - self.__output_state = output_state + input_state = list() + self._matrix: Sequence[Sequence[complex128]] = matrix + self._input_state: Sequence[int] = input_state + self._output_state: Sequence[int] = output_state @property - def matrix(self) -> ndarray: - return self.__matrix + def matrix(self) -> Sequence[Sequence[complex128]]: + return self._matrix @matrix.setter - def matrix(self, matrix: ndarray) -> None: - self.__matrix = matrix + def matrix(self, matrix: Sequence[Sequence[complex128]]) -> None: + self._matrix = matrix @property - def input_state(self) -> ndarray: - return self.__input_state + def input_state(self) -> Sequence[int]: + return self._input_state @input_state.setter - def input_state(self, input_state: ndarray) -> None: - self.__input_state = asarray(input_state, dtype=int64) + def input_state(self, input_state: Sequence[int]) -> None: + # Note that we expect 2nd quantization description here. + self._input_state = input_state @property - def output_state(self) -> ndarray: - return self.__output_state + def output_state(self) -> Sequence[int]: + # Note that we expect 2nd quantization description here. + return self._output_state @output_state.setter - def output_state(self, output_state: ndarray) -> None: - self.__output_state = asarray(output_state, dtype=int64) + def output_state(self, output_state: Sequence[int]) -> None: + self._output_state = output_state + + def calculate(self) -> List[List[complex128]]: + """ + Calculates and returns the effective scattering matrix in the BS instance + for previously given input state, output state and the interferometer matrix. - def calculate(self) -> ndarray: + Note that for the proper results we expect input state and the output state + to be in the 2nd quantization representation. + :return: + The effective scattering matrix in the specified BS instance. + """ if sum(self.input_state) == 0 or sum(self.output_state) == 0: return [] - transposed_input_matrix = transpose(self.__matrix) + transposed_input_matrix = transpose(self._matrix) helper_mtx = [] - for index_of_column_to_insert in range(len(self.__input_state)): + for index_of_column_to_insert in range(len(self._input_state)): helper_mtx += [transposed_input_matrix[index_of_column_to_insert]] * int( - self.__input_state[index_of_column_to_insert] + self._input_state[index_of_column_to_insert] ) helper_mtx = transpose(array(helper_mtx, dtype=complex128)) effective_scattering_matrix = [] - for index_of_row_to_insert in range(len(self.__output_state)): + for index_of_row_to_insert in range(len(self._output_state)): effective_scattering_matrix += [helper_mtx[index_of_row_to_insert]] * int( - self.__output_state[index_of_row_to_insert] + self._output_state[index_of_row_to_insert] ) - return array(effective_scattering_matrix, dtype=complex128) + return effective_scattering_matrix diff --git a/theboss/boson_sampling_utilities/permanent_calculators/bs_cc_ryser_submatrices_permanent_calculator.py b/theboss/boson_sampling_utilities/permanent_calculators/bs_cc_ryser_submatrices_permanent_calculator.py index f5cdd16..f37c607 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/bs_cc_ryser_submatrices_permanent_calculator.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/bs_cc_ryser_submatrices_permanent_calculator.py @@ -9,8 +9,8 @@ BSGuanBasedSubmatricesPermanentCalculatorBase, ) -from numpy import ndarray, complex128, nonzero -from typing import Optional, List +from numpy import complex128, nonzero +from typing import Optional, List, Sequence class BSCCRyserSubmatricesPermanentCalculator( @@ -54,9 +54,9 @@ class BSCCRyserSubmatricesPermanentCalculator( def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: super().__init__(matrix, input_state, output_state) diff --git a/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_base.py b/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_base.py index 8cdf0a5..349aca4 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_base.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_base.py @@ -6,12 +6,12 @@ It takes care of a lot of boilerplate code. """ -from typing import Optional, List +from typing import Optional, List, Sequence import operator from functools import reduce -from numpy import ndarray, int64, array, asarray, zeros, ones, complex128, nonzero +from numpy import complex128, nonzero from ..permanent_calculators.bs_permanent_calculator_interface import ( BSPermanentCalculatorInterface, @@ -22,41 +22,41 @@ class BSPermanentCalculatorBase(BSPermanentCalculatorInterface, abc.ABC): def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex128]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: if output_state is None: - output_state = array([], dtype=int64) + output_state = list() if input_state is None: - input_state = array([], dtype=int64) - self._matrix = matrix - self._input_state = input_state - self._output_state = output_state + input_state = list() + self._matrix: Sequence[Sequence[complex128]] = matrix + self._input_state: Sequence[int] = input_state + self._output_state: Sequence[int] = output_state @property - def matrix(self) -> ndarray: + def matrix(self) -> Sequence[Sequence[complex128]]: return self._matrix @matrix.setter - def matrix(self, matrix: ndarray) -> None: + def matrix(self, matrix: Sequence[Sequence[complex128]]) -> None: self._matrix = matrix @property - def input_state(self) -> ndarray: + def input_state(self) -> Sequence[int]: return self._input_state @input_state.setter - def input_state(self, input_state: ndarray) -> None: - self._input_state = asarray(input_state, dtype=int64) + def input_state(self, input_state: Sequence[int]) -> None: + self._input_state = input_state @property - def output_state(self) -> ndarray: + def output_state(self) -> Sequence[int]: return self._output_state @output_state.setter - def output_state(self, output_state: ndarray) -> None: - self._output_state = asarray(output_state, dtype=int64) + def output_state(self, output_state: Sequence[int]) -> None: + self._output_state = output_state def _can_calculation_be_performed(self) -> bool: """ @@ -66,9 +66,9 @@ def _can_calculation_be_performed(self) -> bool: :return: Information if the calculation can be performed. """ return ( - self._matrix.shape[0] == self._matrix.shape[1] + len(self._matrix) == len(self._matrix[0]) and len(self._output_state) == len(self._input_state) - and len(self._output_state) <= self._matrix.shape[0] + and len(self._output_state) <= len(self._matrix[0]) ) @@ -80,22 +80,24 @@ class BSGuanCodeBasedPermanentCalculatorBase(BSPermanentCalculatorBase, abc.ABC) def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex128]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: super().__init__(matrix, input_state, output_state) # Guan codes-related variables - self._r_vector: ndarray = zeros(len(self._input_state), dtype=int) # g - self._code_update_information: ndarray = ones( - len(self._input_state), dtype=int - ) # u + self._r_vector: List[int] = [0 for _ in range(len(self._input_state))] # g + self._code_update_information: List[int] = [ + 1 for _ in range(len(self._input_state)) + ] # u + self._position_limits: List[int] = list(self._input_state) # n self._index_to_update: int = 0 self._last_value_at_index: int = 0 self._binomials_product: int = 1 + self._multiplier: int = 1 self.permanent: complex128 def _initialize_permanent_computation(self) -> None: @@ -112,10 +114,10 @@ def _initialize_permanent_computation(self) -> None: def _initialize_guan_codes_variables(self) -> None: """ - Initializes Guan codes-related variables before the permanents computation. + Initializes Guan codes-related variables before the permanent computation. """ - self._r_vector = zeros(len(self._input_state), dtype=int) # g - self._code_update_information = ones(len(self._input_state), dtype=int) # u + self._r_vector = [0 for _ in range(len(self._input_state))] # g + self._code_update_information = [1 for _ in range(len(self._input_state))] # u self._position_limits = list(self._input_state) # n def _update_guan_code(self) -> None: @@ -148,8 +150,8 @@ def _update_guan_code(self) -> None: def _update_binomials_product(self) -> None: """ - Update the binomials product to reflect the new Guan code instead of recomputing - it. + Update the binomial coefficients product to reflect the new Guan code instead of + recomputing it. """ if self._r_vector[self._index_to_update] > self._last_value_at_index: self._binomials_product *= ( diff --git a/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_interface.py b/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_interface.py index 180b49c..a307f86 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_interface.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/bs_permanent_calculator_interface.py @@ -1,49 +1,52 @@ __author__ = "Tomasz Rybotycki" """ - This file holds an interface for Boson Sampling Permanent Calculators. Boson Sampling (BS) Permanent calculation is - different in that way, that it requires computing (or rather BS is described by) effective scattering matrix thus - the permanent returned is not necessarily the permanent of a specified matrix. In order to compute the permanent - of a given matrix one should specify input and output states as [1, ... , 1]. + This file holds an interface for Boson Sampling (BS) Permanent Calculators. + BS permanent calculation is different from the standard permanent calculation in a + way, that it requires computing a reduced matrix (called in BS the effective + scattering matrix) that depends on the input and the output state. + + In order to compute the permanent of a given matrix one should set input and output + states to [1, ... , 1]. """ import abc -from numpy import complex128, ndarray +from typing import Sequence class BSPermanentCalculatorInterface(abc.ABC): @abc.abstractmethod - def compute_permanent(self) -> complex128: + def compute_permanent(self) -> complex: """Computes permanent of a matrix given before.""" raise NotImplementedError @property @abc.abstractmethod - def matrix(self) -> ndarray: + def matrix(self) -> Sequence[Sequence[complex]]: raise NotImplementedError @matrix.setter @abc.abstractmethod - def matrix(self, matrix: ndarray) -> None: + def matrix(self, matrix: Sequence[Sequence[complex]]) -> None: raise NotImplementedError @property @abc.abstractmethod - def input_state(self) -> ndarray: + def input_state(self) -> Sequence[int]: raise NotImplementedError @input_state.setter @abc.abstractmethod - def input_state(self, input_state: ndarray) -> None: + def input_state(self, input_state: Sequence[int]) -> None: raise NotImplementedError @property @abc.abstractmethod - def output_state(self) -> ndarray: + def output_state(self) -> Sequence[int]: raise NotImplementedError @output_state.setter @abc.abstractmethod - def output_state(self, output_state: ndarray) -> None: + def output_state(self, output_state: Sequence[int]) -> None: raise NotImplementedError diff --git a/theboss/boson_sampling_utilities/permanent_calculators/bs_submatrices_permanent_calculator_base.py b/theboss/boson_sampling_utilities/permanent_calculators/bs_submatrices_permanent_calculator_base.py index 556e116..1b6c155 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/bs_submatrices_permanent_calculator_base.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/bs_submatrices_permanent_calculator_base.py @@ -9,7 +9,7 @@ from theboss.boson_sampling_utilities.permanent_calculators.bs_submatrices_permanent_calculator_interface import ( BSSubmatricesPermanentCalculatorInterface, ) -from typing import Optional, List +from typing import Optional, List, Sequence from numpy import ndarray, int64, array, zeros, ones, complex128, nonzero import abc @@ -28,41 +28,41 @@ class BSSubmatricesPermanentCalculatorBase( def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: if output_state is None: - output_state = array([], dtype=int64) + output_state = [] if input_state is None: - input_state = array([], dtype=int64) - self._matrix = matrix - self._input_state = input_state - self._output_state = output_state + input_state = [] + self._matrix: Sequence[Sequence[complex]] = matrix + self._input_state: Sequence[int] = input_state + self._output_state: Sequence[int] = output_state @property - def matrix(self) -> ndarray: + def matrix(self) -> Sequence[Sequence[complex]]: return self._matrix @matrix.setter - def matrix(self, matrix: ndarray) -> None: + def matrix(self, matrix: Sequence[Sequence[complex]]) -> None: self._matrix = matrix @property - def input_state(self) -> ndarray: + def input_state(self) -> Sequence[int]: return self._input_state @input_state.setter - def input_state(self, input_state: ndarray) -> None: - self._input_state = array(input_state, dtype=int64) + def input_state(self, input_state: Sequence[int]) -> None: + self._input_state = input_state @property - def output_state(self) -> ndarray: + def output_state(self) -> Sequence[int]: return self._output_state @output_state.setter - def output_state(self, output_state: ndarray) -> None: - self._output_state = array(output_state, dtype=int64) + def output_state(self, output_state: Sequence[int]) -> None: + self._output_state = output_state class BSGuanBasedSubmatricesPermanentCalculatorBase( @@ -76,9 +76,9 @@ class BSGuanBasedSubmatricesPermanentCalculatorBase( def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: super().__init__(matrix, input_state, output_state) @@ -98,7 +98,7 @@ def __init__( def _initialize_guan_codes_variables(self) -> None: """ - Initializes Guan codes-related variables before the permanents computation. + Initializes Guan codes-related variables before the permanent computation. """ self._r_vector = zeros(len(self._input_state), dtype=int) # g self._code_update_information = ones(len(self._input_state), dtype=int) # u @@ -155,7 +155,7 @@ def compute_permanents(self) -> List[complex128]: # Take care of the edge-case, where only 1 sub-matrix is valid (and empty). if sum(self.input_state) == 1: - return list(self.input_state) + return [complex128(v) for v in self.input_state] self._initialize_permanents_computation() diff --git a/theboss/boson_sampling_utilities/permanent_calculators/classic_permanent_calculator.py b/theboss/boson_sampling_utilities/permanent_calculators/classic_permanent_calculator.py index 4329dfe..25b9369 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/classic_permanent_calculator.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/classic_permanent_calculator.py @@ -7,7 +7,7 @@ from typing import List, Optional -from numpy import complex128, ndarray +from numpy import complex128, ndarray, asarray from ..boson_sampling_utilities import EffectiveScatteringMatrixCalculator from ...boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_base import ( @@ -36,7 +36,7 @@ def compute_permanent(self) -> complex128: self._matrix, self._input_state, self._output_state ) scattering_matrix = scattering_matrix_calculator.calculate() - return self._compute_permanent_recursively(scattering_matrix) + return self._compute_permanent_recursively(asarray(scattering_matrix)) def _compute_permanent_recursively(self, matrix: ndarray) -> complex128: """ diff --git a/theboss/boson_sampling_utilities/permanent_calculators/ryser_permanent_calculator.py b/theboss/boson_sampling_utilities/permanent_calculators/ryser_permanent_calculator.py index 0e56a4f..b44976c 100644 --- a/theboss/boson_sampling_utilities/permanent_calculators/ryser_permanent_calculator.py +++ b/theboss/boson_sampling_utilities/permanent_calculators/ryser_permanent_calculator.py @@ -7,9 +7,9 @@ not-continuous (like [1, 1, 0, 1, 0]) inputs. """ -from typing import Optional, Dict +from typing import Optional, Dict, Sequence -from numpy import complex128, ndarray +from numpy import complex128 from ..permanent_calculators.bs_permanent_calculator_base import ( BSGuanCodeBasedPermanentCalculatorBase, @@ -32,13 +32,13 @@ class RyserPermanentCalculator(BSGuanCodeBasedPermanentCalculatorBase): def __init__( self, - matrix: ndarray, - input_state: Optional[ndarray] = None, - output_state: Optional[ndarray] = None, + matrix: Sequence[Sequence[complex128]], + input_state: Optional[Sequence[int]] = None, + output_state: Optional[Sequence[int]] = None, ) -> None: super().__init__(matrix, input_state, output_state) self._multiplier: int - self._considered_columns_indices: ndarray + self._considered_columns_indices: Sequence[int] self.permanent: complex128 self._sums: Dict[int, complex128] diff --git a/theboss/distribution_calculators/bs_appriximated_distribution_from_separable_states_calculator.py b/theboss/distribution_calculators/bs_appriximated_distribution_from_separable_states_calculator.py index 9b89178..08a27d1 100644 --- a/theboss/distribution_calculators/bs_appriximated_distribution_from_separable_states_calculator.py +++ b/theboss/distribution_calculators/bs_appriximated_distribution_from_separable_states_calculator.py @@ -15,7 +15,7 @@ BosonSamplingExperimentConfiguration, ) from ..boson_sampling_utilities.boson_sampling_utilities import ( - generate_possible_outputs, + generate_possible_states, generate_qft_matrix_for_first_m_modes, generate_random_phases_matrix_for_first_m_modes, prepare_interferometer_matrix_in_expanded_space, @@ -94,7 +94,7 @@ def calculate_distribution(self) -> List[float]: """ Computes whole distribution basing on configuration. """ - possible_outcomes = generate_possible_outputs( + possible_outcomes = generate_possible_states( self.configuration.number_of_particles_left, self.configuration.number_of_modes, ) @@ -170,10 +170,10 @@ def _get_2m_outcomes_corresponding_to_the_outcome( all m-mode (n-k)-particles states and just hstack them with output. """ considered_outcomes = [] - possible_m_mode_outputs_with_less_particles = generate_possible_outputs( - number_of_particles=self._configuration.initial_number_of_particles + possible_m_mode_outputs_with_less_particles = generate_possible_states( + particles_number=self._configuration.initial_number_of_particles - sum(outcome), - number_of_modes=self._configuration.number_of_modes, + modes_number=self._configuration.number_of_modes, ) for possible_output in possible_m_mode_outputs_with_less_particles: @@ -186,7 +186,7 @@ def get_outcomes_in_proper_order(self) -> List[ndarray]: Returns states in the same order that distribution probabilities were calculated in. """ - return generate_possible_outputs( + return generate_possible_states( self._configuration.number_of_particles_left, self._configuration.number_of_modes, ) diff --git a/theboss/distribution_calculators/bs_distribution_calculator_interface.py b/theboss/distribution_calculators/bs_distribution_calculator_interface.py index 6b83e03..5f20304 100644 --- a/theboss/distribution_calculators/bs_distribution_calculator_interface.py +++ b/theboss/distribution_calculators/bs_distribution_calculator_interface.py @@ -2,9 +2,7 @@ import abc from dataclasses import dataclass -from typing import List, Iterable - -from numpy import ndarray +from typing import List, Sequence from ..network_simulation_strategy import network_simulation_strategy @@ -12,13 +10,15 @@ # TODO TR: MO doesn't approve of this class. It should be changed somehow. @dataclass class BosonSamplingExperimentConfiguration: - interferometer_matrix: ndarray # A matrix describing interferometer. - initial_state: ndarray + interferometer_matrix: Sequence[ + Sequence[complex] + ] # A matrix describing interferometer. + initial_state: Sequence[int] initial_number_of_particles: int number_of_modes: int number_of_particles_lost: int number_of_particles_left: int - uniform_transmissivity: float = 0 + uniform_transmissivity: float = 1 network_simulation_strategy: network_simulation_strategy = None # TODO TR: Previously we've used the number of approximated modes instead of the # the hierarchy level. There may be some errors after the changes, that @@ -36,12 +36,17 @@ def calculate_distribution(self) -> List[float]: @abc.abstractmethod def calculate_probabilities_of_outcomes( - self, outcomes: Iterable[Iterable[int]] + self, outcomes: List[Sequence[int]] ) -> List[float]: - """ This method allows one to compute probabilities of only selected outcomes. """ + """ + This method allows one to compute probabilities of only selected outcomes. + """ raise NotImplementedError @abc.abstractmethod - def get_outcomes_in_proper_order(self) -> List[ndarray]: - """ One also has to know the order of objects that returned probabilities correspond to """ + def get_outcomes_in_proper_order(self) -> List[Sequence[int]]: + """ + One also has to know the order of objects that returned probabilities correspond + to. + """ raise NotImplementedError diff --git a/theboss/distribution_calculators/bs_distribution_calculator_with_fixed_losses.py b/theboss/distribution_calculators/bs_distribution_calculator_with_fixed_losses.py index e684c54..045cee3 100644 --- a/theboss/distribution_calculators/bs_distribution_calculator_with_fixed_losses.py +++ b/theboss/distribution_calculators/bs_distribution_calculator_with_fixed_losses.py @@ -8,8 +8,8 @@ from scipy.special import binom from theboss.boson_sampling_utilities.boson_sampling_utilities import ( - generate_lossy_inputs, - generate_possible_outputs, + generate_lossy_n_particle_input_states, + generate_possible_states, ) from theboss.boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( BSPermanentCalculatorInterface, @@ -44,7 +44,7 @@ def get_outcomes_in_proper_order(self) -> List[ndarray]: :return: All the possible outcomes of BS experiment specified by the configuration. """ - return generate_possible_outputs( + return generate_possible_states( self.configuration.number_of_particles_left, self.configuration.number_of_modes, ) @@ -119,7 +119,7 @@ def __compute_probability_of_outcome_state_for_indistinguishable_photons( probability_of_outcome = 0 # Symmetrize the input. - lossy_inputs_list = generate_lossy_inputs( + lossy_inputs_list = generate_lossy_n_particle_input_states( self.configuration.initial_state, self.configuration.number_of_particles_left, ) diff --git a/theboss/distribution_calculators/bs_exact_distribution_with_uniform_losses.py b/theboss/distribution_calculators/bs_exact_distribution_with_uniform_losses.py index aff5189..916c619 100644 --- a/theboss/distribution_calculators/bs_exact_distribution_with_uniform_losses.py +++ b/theboss/distribution_calculators/bs_exact_distribution_with_uniform_losses.py @@ -1,15 +1,15 @@ __author__ = "Tomasz Rybotycki" from copy import deepcopy -from typing import List, Iterable +from typing import List, Iterable, Tuple from numpy import ndarray from scipy import special -from ..boson_sampling_utilities.boson_sampling_utilities import ( - generate_possible_outputs, +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( + generate_possible_states, ) -from ..distribution_calculators.bs_distribution_calculator_interface import ( +from theboss.distribution_calculators.bs_distribution_calculator_interface import ( BosonSamplingExperimentConfiguration, ) from ..distribution_calculators.bs_distribution_calculator_with_fixed_losses import ( @@ -96,9 +96,9 @@ def _calculate_probability_of_outcome(self, outcome: ndarray) -> float: return probability_of_outcome * self.weights[l] - def get_outcomes_in_proper_order(self) -> List[ndarray]: - return generate_possible_outputs( + def get_outcomes_in_proper_order(self) -> List[Tuple[int, ...]]: + return generate_possible_states( self.configuration.initial_number_of_particles, self.configuration.number_of_modes, - consider_loses=True, + losses=True, ) diff --git a/theboss/distribution_calculators/bs_sample_based_distribution_calculator.py b/theboss/distribution_calculators/bs_sample_based_distribution_calculator.py index e1f7a0d..065dbe2 100644 --- a/theboss/distribution_calculators/bs_sample_based_distribution_calculator.py +++ b/theboss/distribution_calculators/bs_sample_based_distribution_calculator.py @@ -2,12 +2,10 @@ from typing import List, Iterable -from numpy import float64, ndarray, zeros +from numpy import float64, ndarray, zeros, asarray from ..boson_sampling_simulator import BosonSamplingSimulator -from ..boson_sampling_utilities.boson_sampling_utilities import ( - generate_possible_outputs, -) +from ..boson_sampling_utilities.boson_sampling_utilities import generate_possible_states from ..distribution_calculators.bs_distribution_calculator_interface import ( BosonSamplingExperimentConfiguration, BSDistributionCalculatorInterface, @@ -77,15 +75,16 @@ def calculate_approximate_distribution( self, samples_number: int = 5000 ) -> List[float]: """ - Prepares the approximate distribution using boson sampling simulation method described by - Oszmaniec and Brod. Obviously higher number of samples will generate better approximation. + Prepares the approximate distribution using boson sampling simulation method + described by Oszmaniec and Brod. Obviously higher number of samples will + generate better approximation. :return: Approximate distribution as a list. """ if self._outcomes is not None: possible_outcomes = self._outcomes else: - possible_outcomes = generate_possible_outputs( + possible_outcomes = generate_possible_states( self.configuration.number_of_particles_left, self.configuration.number_of_modes, ) @@ -101,7 +100,7 @@ def calculate_approximate_distribution( for j in range(len(possible_outcomes)): # Check if obtained result is one of possible outcomes. if all( - sample == possible_outcomes[j] + asarray(sample) == possible_outcomes[j] ): # Expect all elements of resultant list to be True. outcomes_probabilities[j] += 1 break diff --git a/theboss/network_simulation_strategy/lossless_network_simulation_strategy.py b/theboss/network_simulation_strategy/lossless_network_simulation_strategy.py index 19033f6..6e1fede 100644 --- a/theboss/network_simulation_strategy/lossless_network_simulation_strategy.py +++ b/theboss/network_simulation_strategy/lossless_network_simulation_strategy.py @@ -1,13 +1,28 @@ __author__ = "Tomasz Rybotycki" from numpy import dot, ndarray +from typing import Sequence from .network_simulation_strategy import NetworkSimulationStrategy class LosslessNetworkSimulationStrategy(NetworkSimulationStrategy): - def __init__(self, matrix: ndarray) -> None: - self._matrix = matrix + """ + A class implementing the evolution of a state of distinguishable particles + through an interferometer. + """ - def simulate(self, input_state: ndarray) -> ndarray: + def __init__(self, matrix: Sequence[Sequence[complex]]) -> None: + self._matrix: Sequence[Sequence[complex]] = matrix + + def simulate(self, input_state: Sequence[int]) -> ndarray: + """ + Computes the dot product of matrix and the input state. + + :param input_state: + The input state to be evolved. + + :return: + The input state evolved through the interferometer matrix. + """ return dot(self._matrix, input_state) diff --git a/theboss/network_simulation_strategy/lossy_network_simulation_strategy.py b/theboss/network_simulation_strategy/lossy_network_simulation_strategy.py index 879e0b8..43bc10b 100644 --- a/theboss/network_simulation_strategy/lossy_network_simulation_strategy.py +++ b/theboss/network_simulation_strategy/lossy_network_simulation_strategy.py @@ -1,33 +1,41 @@ __author__ = "Tomasz Rybotycki" -from numpy import complex128, delete, ndarray, vstack, zeros_like +from numpy import complex128, delete, ndarray, vstack, zeros_like, asarray from .network_simulation_strategy import NetworkSimulationStrategy -from ..boson_sampling_utilities.boson_sampling_utilities import ( +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( prepare_interferometer_matrix_in_expanded_space, ) +from typing import Sequence class LossyNetworkSimulationStrategy(NetworkSimulationStrategy): - def __init__(self, matrix: ndarray) -> None: + def __init__(self, matrix: Sequence[Sequence[complex]]) -> None: self._matrix = prepare_interferometer_matrix_in_expanded_space(matrix) - def simulate(self, input_state: ndarray) -> ndarray: + def simulate(self, input_state: Sequence[int]) -> ndarray: """ This method is for simulating lossy network. - Instead of using NxN matrix, what we need to do is create a 2N x 2N system, and then, depending on singular - values of passed matrix, we have to transfer some photons into inaccessible, additional modes and then trim - these additional modes. + Instead of using NxN matrix, what we need to do is create a 2N x 2N system, and + then, depending on singular values of passed matrix, we have to transfer some + photons into inaccessible, additional modes and then trim these additional + modes. - :param input_state: State before parsing through the interferometer. Assume mode occupation basis. - :return: Lossy output state. + :param input_state: + State before parsing through the interferometer. Assume mode occupation + representation. + + :return: + Lossy output state. """ # Divide by two, because we have 2N x 2N matrix - input_state = input_state.reshape(self._matrix.shape[0] // 2, 1) - expansion_zeros = zeros_like(input_state, dtype=complex128) - expanded_state = vstack([input_state, expansion_zeros]) - evolved_state = self._matrix @ expanded_state + input_state: ndarray = asarray(input_state).reshape( + self._matrix.shape[0] // 2, 1 + ) + expansion_zeros: ndarray = zeros_like(input_state, dtype=complex128) + expanded_state: ndarray = vstack([input_state, expansion_zeros]) + evolved_state: ndarray = self._matrix @ expanded_state # Trim the resultant state while evolved_state.shape[0] > input_state.shape[0]: evolved_state = delete(evolved_state, evolved_state.shape[0] - 1) diff --git a/theboss/quantum_computations_utilities.py b/theboss/quantum_computations_utilities.py index 93d4c9b..310e1c2 100644 --- a/theboss/quantum_computations_utilities.py +++ b/theboss/quantum_computations_utilities.py @@ -5,7 +5,7 @@ # TODO TR: Consider releasing this file as a separate package. -from typing import List, Union +from typing import List, Union, Dict, DefaultDict, Tuple, Set from numpy import ( abs, @@ -18,10 +18,37 @@ asarray, tile, power, - diag, - dot, ) -from numpy.random import randn +from collections import defaultdict + + +def count_total_variation_distance_dicts( + distribution_1: Dict[Tuple[int, ...], float], + distribution_2: Dict[Tuple[int, ...], float], +) -> float: + """ + This method compute TVD between two distributions. We assume that both distributions + sums up to 1. + + :param distribution_1: + First distribution. + :param distribution_2: + Second distribution. + + :return: + Total variation distance between the given distributions. + """ + # Get common keys. + keys: Set[Tuple[int, ...]] = set() + keys.update(distribution_1.keys()) + keys.update(distribution_2.keys()) + + # Wrap distributions into defaultdicts to take care of missing keys. + distribution1: DefaultDict[Tuple[int, ...], float] = defaultdict(lambda: 0) + distribution2: DefaultDict[Tuple[int, ...], float] = defaultdict(lambda: 0) + + # Compute the tvd. + return 0.5 * sum([abs(distribution1[key] - distribution2[key]) for key in keys]) def count_total_variation_distance( diff --git a/theboss/simulation_strategies/cliffords_r_simulation_strategy.py b/theboss/simulation_strategies/cliffords_r_simulation_strategy.py index f9408e3..ca6ad11 100644 --- a/theboss/simulation_strategies/cliffords_r_simulation_strategy.py +++ b/theboss/simulation_strategies/cliffords_r_simulation_strategy.py @@ -10,30 +10,50 @@ from numpy import arange, array, array_split, int64, ndarray, isclose from scipy.special import binom from collections import defaultdict -from typing import List, Dict, Tuple, DefaultDict - -from .simulation_strategy_interface import SimulationStrategyInterface - +from typing import List, Dict, Tuple, DefaultDict, Sequence from rpy2 import robjects from rpy2.robjects import packages +from theboss.simulation_strategies.simulation_strategy_interface import ( + SimulationStrategyInterface, +) + from ..boson_sampling_utilities.boson_sampling_utilities import ( - particle_state_to_modes_state, + mode_assignment_to_mode_occupation, ) class CliffordsRSimulationStrategy(SimulationStrategyInterface): - def __init__(self, interferometer_matrix: ndarray) -> None: + """ + A wrapper for C&C R implementation of their algorithm. + """ + + def __init__(self, interferometer_matrix: Sequence[Sequence[complex]]) -> None: self.interferometer_matrix = interferometer_matrix boson_sampling_package = packages.importr("BosonSampling") self.cliffords_r_sampler = boson_sampling_package.bosonSampler - def set_matrix(self, interferometer_matrix: ndarray) -> None: + def set_matrix(self, interferometer_matrix: Sequence[Sequence[int]]) -> None: + """ + Sets new interferometer matrix. + + :param interferometer_matrix: + New interferometer matrix. + """ self.interferometer_matrix = interferometer_matrix @staticmethod def _numpy_array_to_r_matrix(numpy_array: ndarray) -> robjects.r.matrix: + """ + Transforms numpy.ndarray into the robjects.r.matrix object. + + :param numpy_array: + The numpy.ndarray object to be transformed into robjects.r.matrix. + + :return: + The matrix given in the input as the robjects.r.matrix object. + """ rows_number, columns_number = numpy_array.shape # Transposition is required as R inserts columns, not rows. r_values = robjects.ComplexVector( @@ -42,24 +62,26 @@ def _numpy_array_to_r_matrix(numpy_array: ndarray) -> robjects.r.matrix: return robjects.r.matrix(r_values, nrow=rows_number, ncol=columns_number) def simulate( - self, initial_state: ndarray, samples_number: int = 1 - ) -> List[ndarray]: + self, initial_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: """ - Simulate BS experiment for given input. + Simulate BS experiment for given input. - Note: The results of Clifford & Clifford method are given in the first - quantization description (mode assignment)! + Note: The results of Clifford & Clifford method are given in the first + quantization description (mode assignment)! - :param initial_state: Input state in the modes occupation description. - :param samples_number: Number of samples to sample. + :param initial_state: + Input state in the modes occupation description. + :param samples_number: + Number of samples to sample. - :return: List of samples in the first quantization description (mode - assignment) + :return: + List of samples in the mode occupation representation. """ number_of_bosons = int(sum(initial_state)) boson_sampler_input_matrix = self._numpy_array_to_r_matrix( - self.interferometer_matrix[:, arange(number_of_bosons)] + array(self.interferometer_matrix)[:, arange(number_of_bosons)] ) result, permanent, probability_mass_function = self.cliffords_r_sampler( @@ -78,14 +100,27 @@ def simulate( for sample in samples_in_particle_states: samples_in_occupation_description.append( - particle_state_to_modes_state(sample, len(initial_state)) + mode_assignment_to_mode_occupation(sample, len(initial_state)) ) return samples_in_occupation_description def find_probabilities( - self, initial_state: ndarray, outcomes_of_interest: List[ndarray] + self, initial_state: Sequence[int], outcomes_of_interest: List[Tuple[int, ...]] ) -> Dict[Tuple[int, ...], float]: + """ + An additional "sanity-check" method that uses C&C strategy to compute the + probabilities of the outcomes of interest. + + :param initial_state: + Input state of the BS experiment. + + :param outcomes_of_interest: + The outcomes of which probabilities will be returned. + + :return: + Probabilities of the specified outcomes. + """ number_of_bosons = int(sum(initial_state)) @@ -94,7 +129,7 @@ def find_probabilities( outcomes_probabilities: dict = {} boson_sampler_input_matrix = self._numpy_array_to_r_matrix( - self.interferometer_matrix[:, arange(number_of_bosons)] + array(self.interferometer_matrix)[:, arange(number_of_bosons)] ) number_of_samplings = 0 @@ -113,10 +148,8 @@ def find_probabilities( ) sample_in_particle_states = array_split(python_result, 1)[0] - sample = tuple( - particle_state_to_modes_state( - sample_in_particle_states, len(initial_state) - ) + sample = mode_assignment_to_mode_occupation( + sample_in_particle_states, len(initial_state) ) if sample in outcomes_of_interest: @@ -128,14 +161,31 @@ def find_probabilities( return outcomes_probabilities def find_probabilities_of_n_random_states( - self, initial_state: ndarray, number_of_random_states: int + self, initial_state: Tuple[int, ...], number_of_random_states: int ) -> DefaultDict[Tuple[int, ...], float]: + """ + An additional "sanity-check" method that uses C&C strategy to compute the + probabilities of the outcomes of interest. + + Note: This method may run infinitely if the number of specified states is + impossible to achieve in given experiment config. + + :param initial_state: + Input state of the BS experiment. + + :param number_of_random_states: + The number of first distinct output states sampled using C&C method of + which the probabilities will be returned. + + :return: + Probabilities of the specified outcomes. + """ n = int(sum(initial_state)) m = len(initial_state) boson_sampler_input_matrix = self._numpy_array_to_r_matrix( - self.interferometer_matrix[:, arange(n)] + array(self.interferometer_matrix)[:, arange(n)] ) if int(binom(n + m - 1, m - 1)) < number_of_random_states: @@ -162,7 +212,7 @@ def find_probabilities_of_n_random_states( sample_in_particle_states = array_split(python_result, 1)[0] sample = tuple( - particle_state_to_modes_state( + mode_assignment_to_mode_occupation( sample_in_particle_states, len(initial_state) ) ) diff --git a/theboss/simulation_strategies/fixed_loss_simulation_strategy.py b/theboss/simulation_strategies/fixed_loss_simulation_strategy.py index cc46c04..37fbd95 100644 --- a/theboss/simulation_strategies/fixed_loss_simulation_strategy.py +++ b/theboss/simulation_strategies/fixed_loss_simulation_strategy.py @@ -1,24 +1,30 @@ __author__ = "Tomasz Rybotycki" from random import random -from typing import List, Optional +from typing import List, Optional, Tuple, Sequence from numpy import conjugate, exp, ndarray, ones, sqrt, zeros, pi from numpy.random import rand -from .simulation_strategy_interface import SimulationStrategyInterface -from ..network_simulation_strategy.lossy_network_simulation_strategy import ( +from theboss.simulation_strategies.simulation_strategy_interface import ( + SimulationStrategyInterface, +) +from theboss.network_simulation_strategy.lossy_network_simulation_strategy import ( LossyNetworkSimulationStrategy, ) -from ..network_simulation_strategy.network_simulation_strategy import ( +from theboss.network_simulation_strategy.network_simulation_strategy import ( NetworkSimulationStrategy, ) class FixedLossSimulationStrategy(SimulationStrategyInterface): + """ + A class implementing the approximated simulation strategy for fixed losses. + """ + def __init__( self, - interferometer_matrix: ndarray, + interferometer_matrix: Sequence[Sequence[complex]], number_of_photons_left: int, number_of_observed_modes: int, network_simulation_strategy: Optional[NetworkSimulationStrategy] = None, @@ -27,18 +33,24 @@ def __init__( network_simulation_strategy = LossyNetworkSimulationStrategy( interferometer_matrix ) - self.number_of_photons_left = number_of_photons_left - self.interferometer_matrix = interferometer_matrix - self.number_of_observed_modes = number_of_observed_modes + self.number_of_photons_left: int = number_of_photons_left + self.interferometer_matrix: Sequence[Sequence[complex]] = interferometer_matrix + self.number_of_observed_modes: int = number_of_observed_modes self._network_simulation_strategy = network_simulation_strategy - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: """ - Returns an sample from the approximate distribution in fixed losses regime. + Returns a sample from the approximate distribution in fixed losses regime. - :param samples_number: Number of samples one wants to simulate. - :param input_state: Usually n-particle Fock state in m modes. - :return: A sample from the approximation. + :param samples_number: + Number of samples one wants to simulate. + :param input_state: + Usually n-particle Fock state in m modes. + + :return: + A sample from the approximation. """ samples = [] while len(samples) < samples_number: @@ -48,15 +60,23 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra ) probabilities = self._calculate_probabilities(evolved_state) samples.append( - self._calculate_approximation_of_boson_sampling_outcome(probabilities) + tuple( + self._calculate_approximation_of_boson_sampling_outcome( + probabilities + ) + ) ) return samples - def _prepare_initial_state(self, input_state: ndarray) -> ndarray: + def _prepare_initial_state(self, input_state: Sequence[int]) -> ndarray: """ - This method is used to prepare psi_0 state (formula 23 from ref. [1]). - :param input_state: Initial lossy bosonic state. - :return: Returns the initial state of the formula, which is an equal superposition + This method is used to prepare psi_0 state (formula 23 from ref. [1]). + + :param input_state: + Initial lossy bosonic state. + + :return: + Returns the initial state of the formula, which is an equal superposition of n photons 'smeared' on the first n modes. """ initial_number_of_photons = int(sum(input_state)) @@ -71,25 +91,43 @@ def _prepare_initial_state(self, input_state: ndarray) -> ndarray: @staticmethod def _randomize_modes_phases(state_in_modes_basis: ndarray) -> ndarray: """ - Randomize the phases of given mode state. Each mode should have different iid random phase. - :param state_in_modes_basis: A given state in modes basis. - :return: Given mode state with randomized phases. + Randomize the phases of given mode state. Each mode should have different + iid random phase. + + :param state_in_modes_basis: + A given state in modes basis. + + :return: + Given mode state with randomized phases. """ return exp(1j * 2 * pi * rand(len(state_in_modes_basis))) * state_in_modes_basis @staticmethod - def _calculate_probabilities(state: ndarray) -> ndarray: - return conjugate(state) * state + def _calculate_probabilities(evolved_state: ndarray) -> ndarray: + """ + Computes the probabilities of finding a particle in every mode. + + :param evolved_state: + The approximate state evolved through the interferometer. + + :return: + Probabilities of finding a particle in each of the modes. + """ + return conjugate(evolved_state) * evolved_state def _calculate_approximation_of_boson_sampling_outcome( self, probabilities: ndarray ) -> ndarray: """ - This method applies evolution to every photon. Note, that evolution of each particle is independent of - each other. - :param probabilities: - :return: A lossy boson state after traversing through interferometer. The state is described in first - quantization (mode assignment basis). + This method applies evolution to every photon. Note, that evolution of each + particle is independent of each other. + + :param probabilities: + Probabilities of finding a particle in each of the modes. + + :return: + A lossy boson state after traversing through interferometer. The state is + described in first quantization (mode assignment representation). """ output = zeros(self.number_of_observed_modes) for photon in range(self.number_of_photons_left): @@ -103,4 +141,5 @@ def _calculate_approximation_of_boson_sampling_outcome( prob += probabilities[i] if len(probabilities) != i: output[i] += 1 + return output diff --git a/theboss/simulation_strategies/generalized_cliffords_b_simulation_strategy.py b/theboss/simulation_strategies/generalized_cliffords_b_simulation_strategy.py index 2be1278..36a4eaf 100644 --- a/theboss/simulation_strategies/generalized_cliffords_b_simulation_strategy.py +++ b/theboss/simulation_strategies/generalized_cliffords_b_simulation_strategy.py @@ -10,9 +10,9 @@ from an easier distribution and obtain the same results. """ -from typing import List +from typing import List, Sequence, Tuple -from numpy import array, ndarray, int64, zeros_like +from numpy import zeros_like from numpy.random import choice, randint from theboss.simulation_strategies.generalized_cliffords_simulation_strategy import ( @@ -20,7 +20,7 @@ BSPermanentCalculatorInterface, ) from theboss.boson_sampling_utilities.boson_sampling_utilities import ( - modes_state_to_particle_state, + mode_occupation_to_mode_assignment, ) from theboss.boson_sampling_utilities.permanent_calculators.bs_cc_ryser_submatrices_permanent_calculator import ( @@ -29,23 +29,33 @@ class GeneralizedCliffordsBSimulationStrategy(GeneralizedCliffordsSimulationStrategy): + """ + An implementation of generalized Clifford & Clifford strategy in its B version. + """ + def __init__(self, bs_permanent_calculator: BSPermanentCalculatorInterface) -> None: super().__init__(bs_permanent_calculator) self._current_input = [] self._working_input_state = None - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: """ Returns sample from linear optics experiments given output state. - :param input_state: Input state in particle basis. - :param samples_number: Number of samples to simulate. - :return: A resultant state after traversing through interferometer. + :param input_state: + Input state in particle basis. + :param samples_number: + Number of samples to simulate. + + :return: + A list of samples from the interferometer. """ - self.input_state = input_state - self.number_of_input_photons = sum(input_state) + self.input_state: Sequence[int] = input_state + self.number_of_input_photons: int = sum(input_state) - particle_input_state = list(modes_state_to_particle_state(input_state)) + particle_input_state = list(mode_occupation_to_mode_assignment(input_state)) samples = [] @@ -53,7 +63,7 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra self._current_input = zeros_like(input_state) self._working_input_state = particle_input_state.copy() self._fill_r_sample() - samples.append(array(self.r_sample, dtype=int64)) + samples.append(tuple(self.r_sample)) return samples def _compute_pmf(self) -> None: @@ -89,11 +99,12 @@ def _fill_r_sample(self) -> None: self._compute_pmf() self._sample_from_pmf() - def _update_current_input(self): + def _update_current_input(self) -> None: self._current_input[ self._working_input_state.pop(randint(0, len(self._working_input_state))) ] += 1 def _sample_from_pmf(self) -> None: + # TODO TR: Don't use numpy.random.choice, because it's slow. m = choice(range(len(self.input_state)), p=self.pmf) self.r_sample[m] += 1 diff --git a/theboss/simulation_strategies/generalized_cliffords_b_uniform_losses_simulation_strategy.py b/theboss/simulation_strategies/generalized_cliffords_b_uniform_losses_simulation_strategy.py index c81f3a8..f2ffcee 100644 --- a/theboss/simulation_strategies/generalized_cliffords_b_uniform_losses_simulation_strategy.py +++ b/theboss/simulation_strategies/generalized_cliffords_b_uniform_losses_simulation_strategy.py @@ -13,7 +13,7 @@ from theboss.simulation_strategies.generalized_cliffords_b_simulation_strategy import ( GeneralizedCliffordsBSimulationStrategy, BSPermanentCalculatorInterface, - modes_state_to_particle_state, + mode_occupation_to_mode_assignment, ) from numpy import ndarray, array, int64, zeros_like from typing import List @@ -32,8 +32,9 @@ class GeneralizedCliffordsBUniformLossesSimulationStrategy( TODO TR: Write tests for this method. TODO TR: The permanent calculator should not be necessary for GCCB algorithms, as - they use something the submatrices calculator and . + they use the submatrices calculator instead. """ + def __init__( self, bs_permanent_calculator: BSPermanentCalculatorInterface, @@ -95,7 +96,7 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra self.number_of_input_photons = sum(input_state) self._compute_particle_numbers_probabilities() - particle_input_state = list(modes_state_to_particle_state(input_state)) + particle_input_state = list(mode_occupation_to_mode_assignment(input_state)) samples = [] diff --git a/theboss/simulation_strategies/generalized_cliffords_simulation_strategy.py b/theboss/simulation_strategies/generalized_cliffords_simulation_strategy.py index d57a675..20e8004 100644 --- a/theboss/simulation_strategies/generalized_cliffords_simulation_strategy.py +++ b/theboss/simulation_strategies/generalized_cliffords_simulation_strategy.py @@ -3,42 +3,62 @@ from collections import defaultdict from copy import copy from math import factorial -from typing import List +from typing import List, Sequence, Tuple, DefaultDict from scipy.special import binom from numpy import array, delete, float64, insert, int64, ndarray from numpy.random import random -from .simulation_strategy_interface import SimulationStrategyInterface -from ..boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( +from theboss.simulation_strategies.simulation_strategy_interface import ( + SimulationStrategyInterface, +) +from theboss.boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( BSPermanentCalculatorInterface, ) class GeneralizedCliffordsSimulationStrategy(SimulationStrategyInterface): + """ + Generalized C&C strategy as proposed in [2]. + """ + def __init__(self, bs_permanent_calculator: BSPermanentCalculatorInterface) -> None: - self.r_sample = [] - self.number_of_input_photons = 0 + self.r_sample: List[int] = list() + self.number_of_input_photons: int = 0 self.pmfs = ( dict() ) # Probability mass functions calculated along the way. Keys should be current r as tuples. self._bs_permanent_calculator = bs_permanent_calculator - self.input_state = array([], dtype=int64) - self._labeled_states = defaultdict(list) + self.input_state: Sequence[int] = list() + self._labeled_states: DefaultDict[int, List[Tuple[int, ...]]] = defaultdict( + list + ) self.possible_outputs = dict() - self.current_key = tuple(self.r_sample) - self.current_sample_probability = 1 + self.current_key: Tuple[int, ...] = tuple(self.r_sample) + self.current_sample_probability: float = 1 + + def set_new_matrix(self, new_matrix: Sequence[Sequence[complex]]) -> None: + """ + Sets new interferometer matrix. - def set_new_matrix(self, new_matrix: ndarray) -> None: + :param new_matrix: + New interferometer matrix. + """ self._bs_permanent_calculator.matrix = new_matrix - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: """ - Returns sample from linear optics experiments given output state. + Returns sample from linear optics experiments given output state. + + :param input_state: + Input state in particle basis. + :param samples_number: + Number of samples to simulate. - :param input_state: Input state in particle basis. - :param samples_number: Number of samples to simulate. - :return: A resultant state after traversing through interferometer. + :return: + A resultant state after traversing through interferometer. """ self.input_state = input_state self.number_of_input_photons = sum(input_state) @@ -49,35 +69,40 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra while len(samples) < samples_number: self._fill_r_sample() - samples.append(array(self.r_sample, dtype=int64)) + samples.append(tuple(self.r_sample)) return samples def _get_sorted_possible_states(self) -> None: """ - Calculate and sort all the substates of the input. They will later be used to calculate output - probabilities. + Calculate and sort all the substates of the input. They will later be used + to calculate output probabilities. - :return: Dict of all the possible substates of input (including 0 and the state), labeled with number of - particles in this state. + :return: + Dict of all the possible substates of input (including 0 and the state), + labeled with number of particles in this state. """ # Calculating all possible substates of the input possible_input_states = self._calculate_all_input_substates( - self.input_state.copy() + copy(self.input_state) ) # Labeling them into dict where keys are being number of particles in the state. self._labeled_states = defaultdict(list) for state in possible_input_states: - states_particles_number = sum(state) - self._labeled_states[states_particles_number].append(state) + self._labeled_states[sum(state)].append(state) - def _calculate_all_input_substates(self, state_part_left: ndarray) -> List[ndarray]: + def _calculate_all_input_substates( + self, state_part_left: Sequence[int] + ) -> List[ndarray]: """ Calculates substates of the input in recursive manner. - :param state_part_left: State with reduced modes number. - :return: All the substates for starting number of modes. + :param state_part_left: + State with reduced modes number. + + :return: + All the substates for starting number of modes. """ if len(state_part_left) < 1: return [array([], dtype=int64)] @@ -96,6 +121,9 @@ def _calculate_all_input_substates(self, state_part_left: ndarray) -> List[ndarr return substates def _fill_r_sample(self) -> None: + """ + Creates a sample according to the generalized C&C algorithm. + """ self.r_sample = [0 for _ in self.input_state] self.current_key = tuple(self.r_sample) self.current_sample_probability = 1 @@ -106,17 +134,23 @@ def _fill_r_sample(self) -> None: self._sample_from_latest_pmf() def _calculate_new_layer_of_pmfs(self) -> None: + """ + Adds new layer, from which new particle will be sampled, to the pmfs dict. + """ + number_of_particle_to_sample: int = sum(self.r_sample) + 1 - number_of_particle_to_sample = sum(self.r_sample) + 1 - - possible_input_states = self._labeled_states[number_of_particle_to_sample] + possible_input_states: List[Tuple[int, ...]] = self._labeled_states[ + number_of_particle_to_sample + ] - corresponding_k_vectors = [ + corresponding_k_vectors: List[List[int]] = [ [self.input_state[i] - state[i] for i in range(len(state))] for state in possible_input_states ] - weights = self._calculate_weights_from_k_vectors(corresponding_k_vectors) + weights: List[float] = self._compute_weights_from_k_vectors( + corresponding_k_vectors + ) weights /= sum(weights) self.possible_outputs[ @@ -136,38 +170,71 @@ def _calculate_new_layer_of_pmfs(self) -> None: self.pmfs[self.current_key] = pmf - def _calculate_weights_from_k_vectors( + def _compute_weights_from_k_vectors( self, corresponding_k_vectors: List[List[int]] - ) -> ndarray: - return array( - [self._calculate_weights(vector) for vector in corresponding_k_vectors], - dtype=float64, - ) + ) -> List[float]: + """ + Computes the weights, as in [2], basing on the k_vectors. + + :param corresponding_k_vectors: + A list of k_vectors from which the weights will be computed. + :return: + """ + return [self._calculate_weights(vector) for vector in corresponding_k_vectors] + + def _calculate_weights(self, k_vector: List[int]) -> float: + """ + Computes the weight of the input state basing on the corresponding k_vector + given as the argument. - def _calculate_weights(self, k_vector: List[int]): - l = sum(k_vector) - n = sum(self.input_state) + :param k_vector: + The k_vector for which the weight will be computed. - weight = factorial(l) * factorial(n - l) / factorial(n) + :return: + The weight computed using the k_vector. + + """ + l: int = sum(k_vector) + n: int = sum(self.input_state) + + weight: float = factorial(l) * factorial(n - l) / factorial(n) for m in range(len(self.input_state)): weight *= binom(self.input_state[m], k_vector[m]) return weight - def _generate_possible_output_states(self) -> List[ndarray]: + def _generate_possible_output_states(self) -> List[List[int]]: + """ + Generates a list of possible output states in the current step of the algorithm + basing on the current r_sample. + + :return: + A list of the output state that one may get in the current algorithm's step. + """ possible_output_states = [] for i in range(len(self.r_sample)): new_possible_output = copy(self.r_sample) new_possible_output[i] += 1 - possible_output_states.append(array(new_possible_output, dtype=int64)) + possible_output_states.append(new_possible_output) return possible_output_states def _calculate_outputs_probability( - self, input_state: ndarray, output_state: ndarray + self, input_state: Sequence[int], output_state: Sequence[int] ) -> float: + """ + Computes the probability of the output. + + :param input_state: + Input state of the BS experiment instance. + :param output_state: + Output state of which the probability will be returned. + + :return: + The probability of the given output state. + """ self._bs_permanent_calculator.input_state = input_state self._bs_permanent_calculator.output_state = output_state probability = abs(self._bs_permanent_calculator.compute_permanent()) ** 2 @@ -180,7 +247,9 @@ def _calculate_outputs_probability( return probability def _sample_from_latest_pmf(self) -> None: - + """ + Adds new sample to the output state and handles all the stuff that follows. + """ sample_index = 0 random_value = random() * sum( self.pmfs[self.current_key] diff --git a/theboss/simulation_strategies/generalized_cliffords_uniform_losses_simulation_strategy.py b/theboss/simulation_strategies/generalized_cliffords_uniform_losses_simulation_strategy.py index 1e3f30b..1bc05be 100644 --- a/theboss/simulation_strategies/generalized_cliffords_uniform_losses_simulation_strategy.py +++ b/theboss/simulation_strategies/generalized_cliffords_uniform_losses_simulation_strategy.py @@ -10,9 +10,7 @@ from .generalized_cliffords_simulation_strategy import ( GeneralizedCliffordsSimulationStrategy, ) -from ..boson_sampling_utilities.boson_sampling_utilities import ( - generate_possible_outputs, -) +from ..boson_sampling_utilities.boson_sampling_utilities import generate_possible_states from ..boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( BSPermanentCalculatorInterface, ) @@ -66,8 +64,8 @@ def _initialize_simulation(self, input_state: ndarray) -> None: if self.missing_values_in_distribution: distribution_initializer = -1 # -1 to indicate missing spots - self._possible_outputs = generate_possible_outputs( - sum(input_state), len(input_state), consider_loses=True + self._possible_outputs = generate_possible_states( + sum(input_state), len(input_state), losses=True ) self.distribution = [distribution_initializer for _ in self._possible_outputs] self.unweighted_distribution = [ @@ -149,7 +147,7 @@ def _calculate_new_layer_of_pmfs(self) -> None: pmf = [] - weights = self._calculate_weights_from_k_vectors(corresponding_k_vectors) + weights = self._compute_weights_from_k_vectors(corresponding_k_vectors) weights /= sum(weights) self.possible_outputs[ self.current_key diff --git a/theboss/simulation_strategies/lossy_networks_generalized_cliffords_simulation_strategy.py b/theboss/simulation_strategies/lossy_networks_generalized_cliffords_simulation_strategy.py index 4be5bfa..fcfa489 100644 --- a/theboss/simulation_strategies/lossy_networks_generalized_cliffords_simulation_strategy.py +++ b/theboss/simulation_strategies/lossy_networks_generalized_cliffords_simulation_strategy.py @@ -1,44 +1,88 @@ __author__ = "Tomasz Rybotycki" -from numpy import delete, ndarray, vstack, zeros_like +""" +A class for exact BS simulation using generalized C&C algorithm (version B). +""" -from .generalized_cliffords_simulation_strategy import ( - GeneralizedCliffordsSimulationStrategy, +from numpy import delete, vstack, zeros_like, complex128 +from typing import Sequence, Tuple + +from theboss.simulation_strategies.generalized_cliffords_b_simulation_strategy import ( + GeneralizedCliffordsBSimulationStrategy, + BSPermanentCalculatorInterface, ) -from .simulation_strategy_interface import SimulationStrategyInterface -from ..boson_sampling_utilities.boson_sampling_utilities import ( - prepare_interferometer_matrix_in_expanded_space, + +from theboss.simulation_strategies.simulation_strategy_interface import ( + SimulationStrategyInterface, ) -from ..boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( - BSPermanentCalculatorInterface, + +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( + prepare_interferometer_matrix_in_expanded_space, ) class LossyNetworksGeneralizedCliffordsSimulationStrategy(SimulationStrategyInterface): + """ + This class implements the generalized C&C algorithm for the optical networks with + non-uniform (mode-dependent) losses. It utilizes the fact that we can interpret + the losses as transferring the particle to an inaccessible mode in the expanded + space. This, in turn, is done, by transforming the initial :math:`m \\cross m` + matrix into a :math:`2m \\times 2m` matrix. For more details see and analyse the + prepare_interferometer_matrix_in_expanded_space method. + + It expects the lossy interferometer matrix to be passed in bs_permanent_calculator. + + Note: Let :math:`l_i` denote the losses on the :math:`i`-th mode and + :math:`t_i = 1 - l_i` denote transmissivity of the :math:`i`-th mode. Then, to + apply losses to the lossless interferometer matrix one has to multiply it by + a matrix with :math:`\\sqrt{t_i}` on diagonal. + """ + def __init__(self, bs_permanent_calculator: BSPermanentCalculatorInterface) -> None: bs_permanent_calculator.matrix = prepare_interferometer_matrix_in_expanded_space( bs_permanent_calculator.matrix ) - self._helper_strategy = GeneralizedCliffordsSimulationStrategy( + + # If for whatever reason one would like to run Clifford & Clifford A algorithm + # for non-uniformly lossy networks using the expanded dimension approach, one + # only has to change the helper strategy here. + self._helper_strategy: GeneralizedCliffordsBSimulationStrategy = GeneralizedCliffordsBSimulationStrategy( bs_permanent_calculator ) - def simulate(self, input_state: ndarray, samples_number: int = 1) -> [ndarray]: - expansion_zeros = zeros_like(input_state, dtype=int) - expanded_state = vstack([input_state, expansion_zeros]) + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> [Tuple[int, ...]]: + """ + The main method of the class. It returns desired number of samples from the + (potentially non-uniformly lossy) BS experiment with given input state and + interferometer matrix (specified previously). + + :param input_state: + Input state of the BS experiment. + :param samples_number: + The number of samples to be returned. + + :return: + Samples from the exact BS distribution. + """ + expanded_state = vstack([input_state, zeros_like(input_state, dtype=int)]) expanded_state = expanded_state.reshape(2 * len(input_state),) + expanded_samples = self._helper_strategy.simulate( expanded_state, samples_number ) - # Trim output state + # Trim the output states. samples = [] for output_state in expanded_samples: while len(output_state) > len(input_state): output_state = delete(output_state, len(output_state) - 1) - samples.append(output_state) + samples.append(tuple(output_state)) return samples - def set_new_matrix(self, matrix: ndarray) -> None: - self._helper_strategy.set_new_matrix(matrix) + def set_new_matrix(self, matrix: Sequence[Sequence[complex128]]) -> None: + self._helper_strategy.set_new_matrix( + prepare_interferometer_matrix_in_expanded_space(matrix) + ) diff --git a/theboss/simulation_strategies/lossy_state_approximated_simulation_strategy.py b/theboss/simulation_strategies/lossy_state_approximated_simulation_strategy.py index 7cff70c..fa2f51e 100644 --- a/theboss/simulation_strategies/lossy_state_approximated_simulation_strategy.py +++ b/theboss/simulation_strategies/lossy_state_approximated_simulation_strategy.py @@ -16,26 +16,29 @@ ndarray, hstack, zeros_like, - complex128, - eye, - pi, - ones, - exp, - diag, + array, arange, ) -from numpy.random import choice, rand, shuffle -from typing import List +from numpy.random import choice, shuffle +from typing import List, Tuple, Sequence from scipy.special import binom -from ..boson_sampling_utilities.boson_sampling_utilities import generate_lossy_inputs +from theboss.boson_sampling_utilities.boson_sampling_utilities import ( + generate_lossy_n_particle_input_states, + generate_qft_matrix_for_first_m_modes, + generate_random_phases_matrix_for_first_m_modes, +) from multiprocessing import cpu_count import multiprocessing from concurrent.futures import ProcessPoolExecutor as Pool from copy import deepcopy -from ..quantum_computations_utilities import compute_qft_matrix class LossyStateApproximationSimulationStrategy(SimulationStrategyInterface): + """ + An implementation of the BOBS strategy [2] designed for the uniformly lossy + experiments. It applies the losses to the state before the sampling begins. + """ + def __init__( self, bs_permanent_calculator: BSPermanentCalculatorInterface, @@ -50,23 +53,54 @@ def __init__( self._not_approximated_lossy_mixed_state_parts_weights = None # Required for general simulation - self._hierarchy_level = hierarchy_level - self._uniform_transmissivity = uniform_transmissivity - self._threads_number = self._get_proper_threads_number(threads_number) - self._permanent_calculator = ( - bs_permanent_calculator # Should contain an UNITARY (no losses here!) + self._hierarchy_level: int = hierarchy_level + self._uniform_transmissivity: float = uniform_transmissivity + self._threads_number: int = self._get_proper_threads_number(threads_number) + self._permanent_calculator: BSPermanentCalculatorInterface = ( + bs_permanent_calculator # Should contain a UNITARY (no losses here!) + ) + self._qft_matrix = generate_qft_matrix_for_first_m_modes( + len(bs_permanent_calculator.input_state) - hierarchy_level, + len(bs_permanent_calculator.input_state), ) - self._qft_matrix = self._get_qft_matrix() @staticmethod def _get_proper_threads_number(threads_number: int) -> int: + """ + Computes the proper number of thread, if the one specified by the user + is a nonsense. + + Note: Maximal number of threads is given if the number specified by the user + is negative. + + TODO TR: Consider putting it into some general file. + + :param threads_number: + Threads number specified by the user. + + :return: + The number of threads that the sampler will use. + """ if threads_number < 1 or threads_number > cpu_count(): return cpu_count() else: return threads_number - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: + """ + Generates a list of samples from the BS experiment instance. + + :param input_state: + The input state of the BS experiment. + + :param samples_number: + The number of samples that will be returned. + :return: + A list of sampled outputs. + """ if samples_number < 1: return [] @@ -82,8 +116,8 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra samples_number ) - # Context is required on Linux systems, as the default (fork) produces undesired results! Spawn is default - # on osX and Windows and works as expected. + # Context is required on Linux systems, as the default (fork) produces undesired + # results! Spawn is default on osX and Windows and works as expected. multiprocessing_context = multiprocessing.get_context("spawn") with Pool(mp_context=multiprocessing_context) as p: @@ -96,27 +130,41 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra return samples def _prepare_not_approximated_lossy_mixed_state( - self, not_approximated_input_state_part: ndarray + self, not_approximated_input_state_part: Sequence[int] ) -> None: + """ + + :param not_approximated_input_state_part: + :return: + """ self._prepare_not_approximated_lossy_mixed_state_parts( not_approximated_input_state_part ) self._prepare_not_approximated_lossy_mixed_state_parts_weights() def _prepare_not_approximated_lossy_mixed_state_parts( - self, not_approximated_input_state_part: ndarray + self, not_approximated_input_state_part: Sequence[int] ) -> None: + """ + + :param not_approximated_input_state_part: + :return: + """ self._not_approximated_lossy_mixed_state_parts = [] for number_of_particles_left in range( sum(not_approximated_input_state_part) + 1 ): self._not_approximated_lossy_mixed_state_parts.extend( - generate_lossy_inputs( + generate_lossy_n_particle_input_states( not_approximated_input_state_part, number_of_particles_left ) ) def _prepare_not_approximated_lossy_mixed_state_parts_weights(self) -> None: + """ + + :return: + """ # Do note that this method HAS TO be called after lossy mixed state parts are # computed. possible_weights = self._get_possible_lossy_inputs_weights( @@ -132,7 +180,14 @@ def _prepare_not_approximated_lossy_mixed_state_parts_weights(self) -> None: possible_weights[int(sum(state_part))] / binom(n, sum(state_part)) ) - def _get_possible_lossy_inputs_weights(self, input_state: ndarray) -> List[float]: + def _get_possible_lossy_inputs_weights( + self, input_state: Sequence[int] + ) -> List[float]: + """ + + :param input_state: + :return: + """ weights = [] # I'll use the same notation as in [1], for readability. @@ -145,11 +200,15 @@ def _get_possible_lossy_inputs_weights(self, input_state: ndarray) -> List[float return weights def _prepare_approximated_input_state( - self, approximated_input_state_part: ndarray + self, approximated_input_state_part: Sequence[int] ) -> None: + """ + :param approximated_input_state_part: + :return: + """ # Assume exact simulation if hierarchy level is not specified. - if not 0 <= self._hierarchy_level < self._permanent_calculator.matrix.shape[0]: + if not 0 <= self._hierarchy_level < len(self._permanent_calculator.matrix): self._approximated_input_state_part_possibilities = [[]] self._approximated_input_state_part_possibilities_weights = [1] return @@ -158,8 +217,13 @@ def _prepare_approximated_input_state( self._prepare_approximated_input_state_parts_weights() def _prepare_approximated_input_state_parts( - self, approximated_input_state_part: ndarray + self, approximated_input_state_part: Sequence[int] ) -> None: + """ + + :param approximated_input_state_part: + :return: + """ self._approximated_input_state_part_possibilities = [] for number_of_particles_left in range( int(sum(approximated_input_state_part)) + 1 @@ -170,21 +234,38 @@ def _prepare_approximated_input_state_parts( state_part_possibility ) - def _prepare_approximated_input_state_parts_weights(self): + def _prepare_approximated_input_state_parts_weights(self) -> None: + """ + Prepare the probabilities of obtaining a given number of particles in the + approximated part of the input. + """ self._approximated_input_state_part_possibilities_weights = self._get_possible_lossy_inputs_weights( - # Last part contains all possible particles. - self._approximated_input_state_part_possibilities[-1] + self._approximated_input_state_part_possibilities[ + -1 + ] # Last part contains all possible particles. ) @staticmethod - def _distribute_uniformly(val: int, bins: int) -> List[int]: - # TODO TR: Might be but in a more general file. + def _distribute_uniformly(values_number: int, bins: int) -> List[int]: + """ + Uniformly distributes the values between the specified number of bins. + + TODO TR: Might be put in a more general file. + + :param values_number: + The number of elements to be divided into bins. + :param bins: + The number of bins into which elements will be divided. + + :returns: + The number of elements in each bin. + """ distributed_values = [] for v in range(bins): - distributed_values.append(val // bins) + distributed_values.append(values_number // bins) - for i in range(val % bins): + for i in range(values_number % bins): distributed_values[i] += 1 return distributed_values @@ -192,11 +273,27 @@ def _distribute_uniformly(val: int, bins: int) -> List[int]: def _compute_number_of_samples_for_each_thread( self, samples_number: int ) -> List[int]: + """ + Computes the number of samples that each thread should return. + + :param samples_number: + The total number of samples. + + :return: + A list of samples that should be returned by each thread. + """ return self._distribute_uniformly(samples_number, self._threads_number) - def _simulate_in_parallel(self, samples_number: int = 1) -> List[ndarray]: - """ This method produces given number of samples from lossy approximated - (separable) state. It's meant to be run in parallel. + def _simulate_in_parallel(self, samples_number: int = 1) -> List[Tuple[int, ...]]: + """ + This method produces given number of samples from lossy approximated + (separable) state. It's meant to be run in parallel if so desired. + + :param samples_number: + A number of samples returned by the method. + + :returns: + A list of samples. """ samples = [] @@ -212,8 +309,13 @@ def _simulate_in_parallel(self, samples_number: int = 1) -> List[ndarray]: return samples - def _get_input_state_for_sampling(self): + def _get_input_state_for_sampling(self) -> ndarray: + """ + Applies losses to the input state and returns the result. + :return: + Lossy input state. + """ approximated_part = self._approximated_input_state_part_possibilities[ choice( range(len(self._approximated_input_state_part_possibilities)), @@ -229,40 +331,32 @@ def _get_input_state_for_sampling(self): ] return hstack([not_approximated_part, approximated_part]) - # Symmetrization fix def _permuted_interferometer_matrix(self) -> ndarray: + """ + Permute the columns of the matrix for better symmetrization and possibly + more accurate sampling. + + :return: + The interferometer matrix with permuted columns. + """ permutation = arange( - self._permanent_calculator.matrix.shape[0] + len(self._permanent_calculator.matrix) ) # We work with unitary matrices. shuffle(permutation) - return self._permanent_calculator.matrix[:, permutation] + return array(self._permanent_calculator.matrix)[:, permutation] def _get_matrix_for_approximate_sampling(self) -> ndarray: - # TODO TR: THIS WILL BE REWRITTEN AFTER MERGING WITH BRUTE-FORCE BRANCH - random_phases_matrix = self._get_random_phases_matrix() + """ + Prepares the matrix for the approximate sampling, as intended in [2]. + + :return: + A matrix for the approximate sampling. + """ + random_phases_matrix = generate_random_phases_matrix_for_first_m_modes( + len(self._qft_matrix) - self._hierarchy_level, len(self._qft_matrix) + ) return ( self._permuted_interferometer_matrix() @ random_phases_matrix @ self._qft_matrix ) - - def _get_qft_matrix(self): - modes_number = self._permanent_calculator.matrix.shape[0] - small_qft_matrix = compute_qft_matrix(modes_number - self._hierarchy_level) - qft_matrix = eye(modes_number, dtype=complex128) - - qft_matrix[ - self._hierarchy_level : modes_number, self._hierarchy_level : modes_number - ] = small_qft_matrix - - return qft_matrix - - def _get_random_phases_matrix(self) -> ndarray: - modes_number = self._permanent_calculator.matrix.shape[0] - random_phases = ones(modes_number, dtype=complex128) - - random_phases[self._hierarchy_level : modes_number] = exp( - 1j * 2 * pi * rand(modes_number - self._hierarchy_level) - ) - - return diag(random_phases) diff --git a/theboss/simulation_strategies/generalized_cliffords_simulation_strategy_v2.py b/theboss/simulation_strategies/mode_assignment_generalized_cliffords_simulation_strategy.py similarity index 56% rename from theboss/simulation_strategies/generalized_cliffords_simulation_strategy_v2.py rename to theboss/simulation_strategies/mode_assignment_generalized_cliffords_simulation_strategy.py index 63159e3..70e35e2 100644 --- a/theboss/simulation_strategies/generalized_cliffords_simulation_strategy_v2.py +++ b/theboss/simulation_strategies/mode_assignment_generalized_cliffords_simulation_strategy.py @@ -1,12 +1,9 @@ __author__ = "Tomasz Rybotycki" -from copy import copy -from typing import List - -from numpy import append, array, int64, ndarray +from typing import List, Tuple, Sequence from ..boson_sampling_utilities.boson_sampling_utilities import ( - particle_state_to_modes_state, + mode_assignment_to_mode_occupation, ) from ..boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( BSPermanentCalculatorInterface, @@ -16,19 +13,30 @@ ) -class GeneralizedCliffordsSimulationStrategyInterfaceV2( +class ModeAssignmentGeneralizedCliffordsSimulationStrategyInterface( GeneralizedCliffordsSimulationStrategy ): + """ + An implementation generalized C&C algorithm that works in the mode assignment + description of the states (as the original C&C and the [2] description). + """ + def __init__(self, bs_permanent_calculator: BSPermanentCalculatorInterface) -> None: super().__init__(bs_permanent_calculator) - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: """ - Returns sample from linear optics experiments given output state. + Returns sample from linear optics experiments given input state. + + :param input_state: + Input state in the mode occupation representation. + :param samples_number: + Number of samples to simulate. - :param input_state: Input state in particle basis. - :param samples_number: Number of samples to simulate. - :return: A resultant state after traversing through interferometer. + :return: + A list of sampled output states in the mode occupation description. """ self.input_state = input_state self.number_of_input_photons = sum(input_state) @@ -40,15 +48,16 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra while len(samples) < samples_number: self._fill_r_sample() samples.append( - particle_state_to_modes_state( - array(self.r_sample, dtype=int64), len(self.input_state) - ) + mode_assignment_to_mode_occupation(self.r_sample, len(self.input_state)) ) return samples def _fill_r_sample(self) -> None: - self.r_sample = [] - self.current_key = tuple(self.r_sample) + """ + Creates a sample according to the generalized C&C algorithm. + """ + self.r_sample = tuple() + self.current_key = self.r_sample self.current_sample_probability = 1 while self.number_of_input_photons > len(self.r_sample): @@ -57,6 +66,9 @@ def _fill_r_sample(self) -> None: self._sample_from_latest_pmf() def _calculate_new_layer_of_pmfs(self) -> None: + """ + Adds new layer, from which new particle will be sampled, to the pmfs dict. + """ number_of_particle_to_sample = len(self.r_sample) + 1 possible_input_states = self._labeled_states[number_of_particle_to_sample] corresponding_k_vectors = [ @@ -66,16 +78,14 @@ def _calculate_new_layer_of_pmfs(self) -> None: pmf = [] - weights = self._calculate_weights_from_k_vectors( - array(corresponding_k_vectors, dtype=float) - ) + weights = self._compute_weights_from_k_vectors(corresponding_k_vectors) weights /= sum(weights) self.possible_outputs[ self.current_key ] = self._generate_possible_output_states() for output in self.possible_outputs[self.current_key]: - output = particle_state_to_modes_state(output, len(self.input_state)) + output = mode_assignment_to_mode_occupation(output, len(self.input_state)) pmf.append(0) for i in range(len(possible_input_states)): probability = self._calculate_outputs_probability( @@ -86,12 +96,17 @@ def _calculate_new_layer_of_pmfs(self) -> None: self.pmfs[self.current_key] = pmf - def _generate_possible_output_states(self) -> List[ndarray]: - possible_output_states = [] + def _generate_possible_output_states(self) -> List[Tuple[int, ...]]: + """ + Generates a list of possible output states in the current step of the algorithm + basing on the current r_sample. + + :return: + A list of the output state that one may get in the current algorithm's step. + """ + possible_output_states: List[Tuple[int, ...]] = [] for i in range(len(self.input_state)): - new_possible_output = copy(self.r_sample) - new_possible_output = append(new_possible_output, [i]) - possible_output_states.append(array(new_possible_output, dtype=int64)) + possible_output_states.append(self.r_sample + (i,)) return possible_output_states diff --git a/theboss/simulation_strategies/nonuniform_losses_approximation_strategy.py b/theboss/simulation_strategies/nonuniform_losses_approximation_strategy.py index c36ba5c..0f59ebd 100644 --- a/theboss/simulation_strategies/nonuniform_losses_approximation_strategy.py +++ b/theboss/simulation_strategies/nonuniform_losses_approximation_strategy.py @@ -1,105 +1,244 @@ __author__ = "Tomasz Rybotycki" """ - This file contains implementation of approximate boson sampling strategy subject to non-uniform losses. This can be - well used to approximate boson sampling experiments with non-balanced network. More details can be found in [2]. + This file contains the implementation of approximate boson sampling strategy subject + to non-uniform losses. This can be used to approximate boson sampling experiments + with non-balanced networks. More details can be found in [2]. """ import multiprocessing from concurrent.futures import ProcessPoolExecutor as Pool from copy import deepcopy -from itertools import repeat from math import sqrt from multiprocessing import cpu_count -from typing import List +from typing import List, Sequence, Dict, Tuple -from numpy import ndarray, diag, ones_like -from numpy.random import choice -from scipy import special +from numpy import ndarray, diag, isclose +from numpy.random import random, randint +from numpy.linalg import svd +from scipy.special import binom from .lossy_networks_generalized_cliffords_simulation_strategy import ( BSPermanentCalculatorInterface, LossyNetworksGeneralizedCliffordsSimulationStrategy, ) from ..boson_sampling_utilities.boson_sampling_utilities import ( - prepare_interferometer_matrix_in_expanded_space, generate_qft_matrix_for_first_m_modes, generate_random_phases_matrix_for_first_m_modes, ) +# I implement my version of choice, as it seems that numpy.random.choice is very slow. +def choice(values: Sequence[int], weights: Sequence[float] = None) -> int: + """ + Returns one of the values according to specified weights. If weights aren't + specified properly, the method samples value uniformly at random. + + Notice that in this scenario I only want to get the number of particles left after + application of uniform losses, hence the values are of type int and weights of + type float. + + :param values: + Values to sample from. + + :param weights: + Weights according to which the sampling will be performed. + + :return: + Sampled value. + """ + if weights is None: + weights = list() + + if len(values) != len(weights): + return values[randint(0, len(values))] + + weights_sum: float = 0 + random_number: float = random() + + for i in range(len(values)): + weights_sum += weights[i] + if weights_sum > random_number: + return values[i] + + class NonuniformLossesApproximationStrategy: + """ + This is an implementation of the algorithm presented by Brod and Oszmaniec in their + 2020 work [2]. Without the loss of generalization we assume that the first :math:`k` + modes will be approximated. + """ + def __init__( self, bs_permanent_calculator: BSPermanentCalculatorInterface, approximated_modes_number: int, - modes_transsmisivity: float, threads_number: int = -1, ) -> None: self._approximated_modes_number = self._get_proper_approximated_modes_number( bs_permanent_calculator, approximated_modes_number ) - self._modes_transmissivity = modes_transsmisivity - self._initial_matrix = self._prepare_initial_matrix(bs_permanent_calculator) + self._uniform_losses: float = 0 + self._initial_matrix: Sequence[Sequence[complex]] = list(list()) - self._binom_weights = self._compute_binomial_weights() + self._extract_losses_from_the_interferometer(bs_permanent_calculator.matrix) + # Fill weights at the beginning of the simulation, when input is given. + self._binomial_weights: Dict[int, List[float]] = dict() self._threads_number = self._get_proper_threads_number(threads_number) - self._permanent_calculator = bs_permanent_calculator + self._state_without_approximated_modes: List[int] = list() + self._approximated_modes_particles_number: int = 0 + @staticmethod def _get_proper_approximated_modes_number( bs_permanent_calculator: BSPermanentCalculatorInterface, approximated_modes_number: int, ): - if approximated_modes_number > bs_permanent_calculator.matrix.shape[0]: - approximated_modes_number = bs_permanent_calculator.matrix.shape[0] - if approximated_modes_number < 0: - approximated_modes_number = 0 - return approximated_modes_number - - def _prepare_initial_matrix( - self, bs_permanent_calculator: BSPermanentCalculatorInterface - ): + """ + Bounds the approximated modes number to the proper values. - loss_removing_matrix = ones_like(bs_permanent_calculator.matrix[0]) - loss_removing_matrix[: self._approximated_modes_number] = 1.0 / sqrt( - self._modes_transmissivity - ) # This here assumes uniform losses - loss_removing_matrix = diag(loss_removing_matrix) + :param bs_permanent_calculator: + Permanent calculator. Required do assess the total number of modes. + :param approximated_modes_number: + Number of approximated modes specified by the user. - initial_matrix = bs_permanent_calculator.matrix @ loss_removing_matrix + :return: + Properly bounded number of approximated modes. + """ + total_number_of_modes: int = len(bs_permanent_calculator.matrix) - initial_matrix = prepare_interferometer_matrix_in_expanded_space(initial_matrix) - - return initial_matrix - - def _compute_binomial_weights(self): - - eta = self._modes_transmissivity - k = self._approximated_modes_number + if approximated_modes_number > total_number_of_modes: + approximated_modes_number = total_number_of_modes + if approximated_modes_number < 0: + approximated_modes_number = 0 + return approximated_modes_number - binom_weights = [] + def _extract_losses_from_the_interferometer( + self, interferometer_matrix: Sequence[Sequence[complex]] + ) -> None: + """ + Extracts maximal amount of uniform losses from the interferometer_matrix. + + :param interferometer_matrix: + Possibly lossy (sub)unitary interferometer matrix. + """ + u: ndarray + s: List[float] + v: ndarray + u, s, v = svd(interferometer_matrix) + + # Extract uniform losses from the matrix + transmissivities: List[float] = [singular_value ** 2 for singular_value in s] + losses: List[float] = [1 - eta for eta in transmissivities] + self._uniform_losses = min(losses) + + # Form the interferometer_matrx with losses extracted. + transmissivities = [ + eta / (1 - self._uniform_losses) for eta in transmissivities + ] + s = [sqrt(eta) for eta in transmissivities] + self._initial_matrix = u @ diag(s) @ v + + def _compute_binomial_weights( + self, max_particles_number: int + ) -> Dict[int, List[float]]: + """ + Prepares a dict of list of binomial weights for sampling proper number of + particles in a mode after application of uniform losses extracted from the + interferometer matrix. + + :param max_particles_number: + Maximal number of particles in a single mode of the input state. Recall that + we expect the input state to already be in the proper form, e.g. with the + bunching prepared according to the algorithm specification [2]. + + :return: + A dict of lists of binomial weights that one can use to sample the number + of particles left after application of the uniform losses. + """ + eta = 1 - self._uniform_losses # Uniform transmissivity + + binomial_weights: Dict[int, List[float]] = {} + + # Just a shorthand notation. + def binomial_weight(total_particles: int, particles_left: int) -> float: + """ + + :param total_particles: + Total number of particles. + :param particles_left: + Number of particles left. + :return: + Probability of getting :math:`l` particles after application of uniform + losses to :math:`n` particles given transmissivity :math:`\\eta`. + """ + return ( + pow(eta, particles_left) + * binom(total_particles, particles_left) + * pow(1.0 - eta, total_particles - particles_left) + ) - weight = lambda l: pow(eta, l) * special.binom(k, l) * pow(1.0 - eta, k - l) - for i in range(k + 1): - binom_weights.append(weight(i)) + for n in range(1, max_particles_number + 1): + binomial_weights[n] = [] + for particles_left in range(n + 1): + binomial_weights[n].append(binomial_weight(n, particles_left)) - return binom_weights + return binomial_weights - def _get_proper_threads_number(self, threads_number: int) -> int: + @staticmethod + def _get_proper_threads_number(threads_number: int) -> int: + """ + For multithreading. Shorthand notation for is that for the number exceeding + the total cpu_count() or the specified threads number is negative, then maximal + possible threads number (cpu_count()) is computed. + + :param threads_number: + Threads number given by the user. + + :return: + Possibly fixed number of threads. + """ if threads_number < 1 or threads_number > cpu_count(): return cpu_count() else: return threads_number - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: - + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Sequence[int]]: + """ + Main method of the simulator. It samples from the approximate BS distribution + that is specified by the input_state and the previously given interferometer + matrix. + + :param input_state: + Fock state in the 2nd quantization description. + :param samples_number: + The number of samples that will be returned. + + :return: + Specified number of samples from the approximate BS distribution. + """ if samples_number < 1: - return [] + return list() + + # Prepare the state used in the approximate simulation. + self._state_without_approximated_modes = list(input_state) + + for i in range(self._approximated_modes_number): + self._approximated_modes_particles_number += input_state[i] + self._state_without_approximated_modes[i] = 0 + + maximum_particles_in_mode: int = max( + max(input_state), self._approximated_modes_particles_number + ) + self._binomial_weights = self._compute_binomial_weights( + maximum_particles_in_mode + ) # Get samples number per thread samples_per_thread = ( @@ -110,62 +249,99 @@ def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarra samples_per_thread = int(samples_per_thread / self._threads_number) samples_for_threads = [samples_per_thread] * self._threads_number - # Context is required on Linux systems, as the default (fork) produces undesired results! Spawn is default - # on osX and Windows and works as expected. + # Context is required on Linux systems, as the default (fork) produces undesired + # results! Spawn is default on osX and Windows and works as expected. multiprocessing_context = multiprocessing.get_context("spawn") with Pool(mp_context=multiprocessing_context) as p: - samples_lists = p.map( - self._simulate_in_parallel, repeat(input_state), samples_for_threads - ) + samples_lists = p.map(self._simulate_in_parallel, samples_for_threads) samples = [sample for samples_list in samples_lists for sample in samples_list] return samples - def _simulate_in_parallel(self, input_state: ndarray, samples_number: int = 1): + def _simulate_in_parallel(self, samples_number: int = 1) -> List[Sequence[int]]: + """ + A part of simulation that can be performed independently in separate threads. It + creates samples using GCCB strategy destined for lossy networks. + + :param samples_number: + The number of samples that a single thread should sample. + + :return: + A list of sampled output states. + """ samples = [] - helper_strategy = LossyNetworksGeneralizedCliffordsSimulationStrategy( + helper_strategy: LossyNetworksGeneralizedCliffordsSimulationStrategy = LossyNetworksGeneralizedCliffordsSimulationStrategy( deepcopy(self._permanent_calculator) ) for _ in range(samples_number): - lossy_input = self._compute_lossy_input(input_state) + approximate_state = deepcopy(self._state_without_approximated_modes) - # if not array_equal(lossy_input, input_state): - # print(f"Got {lossy_input.__str__()}, expected: {input_state.__str__()}") # For k = # modes + # Symmetrization. + if self._approximated_modes_number > 0: + approximate_state[ + randint(0, self._approximated_modes_number) + ] = self._approximated_modes_particles_number - approximate_sampling_matrix = self._get_matrix_for_approximate_sampling() + lossy_approximate_input_state = self._compute_lossy_input(approximate_state) - # if not array_equal(approximate_sampling_matrix, self._initial_matrix): - # print(f"Got {approximate_sampling_matrix.__str__()}, expected: {self._initial_matrix.__str__()}") # For k = # modes + approximate_sampling_matrix = self._get_matrix_for_approximate_sampling() helper_strategy.set_new_matrix(approximate_sampling_matrix) - samples.append(helper_strategy.simulate(lossy_input)[0]) + samples.append(helper_strategy.simulate(lossy_approximate_input_state)[0]) return samples - def _compute_lossy_input(self, input_state: ndarray) -> ndarray: + def _compute_lossy_input(self, input_state: Sequence[int]) -> Tuple[int, ...]: + """ + Applies the initial channel of extracted uniform losses to the input state. - if self._approximated_modes_number < 1: - return input_state + :param input_state: + Input state to which uniform losses channel will be applied. - lossy_input = deepcopy(input_state) + :return: + Lossy input state. + """ - binned_input_index = self._approximated_modes_number - 1 - lossy_input[binned_input_index] = choice( - range(self._approximated_modes_number + 1), p=self._binom_weights - ) + # If there are no uniform losses at the beginning, then the input cannot be + # lossy. All potential losses are in the network. + if isclose(self._uniform_losses, 0): + return tuple(input_state) + + lossy_input = tuple() + + for mode in range(len(input_state)): + + if input_state[mode] == 0: + lossy_input += (0,) + continue + + lossy_input += ( + choice( + list(range(input_state[mode] + 1)), + self._binomial_weights[input_state[mode]], + ), + ) return lossy_input def _get_matrix_for_approximate_sampling(self) -> ndarray: + """ + Generates the matrix for the approximate simulation. Do note that this matrix + has to be computed for each sample, as we have to apply random phases for + each sample. + + :return: + Returns an array for the matrix + """ qft_matrix = generate_qft_matrix_for_first_m_modes( - self._approximated_modes_number, self._initial_matrix.shape[0] + self._approximated_modes_number, len(self._initial_matrix) ) random_phases_matrix = generate_random_phases_matrix_for_first_m_modes( - self._approximated_modes_number, self._initial_matrix.shape[0] + self._approximated_modes_number, len(self._initial_matrix) ) return self._initial_matrix @ random_phases_matrix @ qft_matrix diff --git a/theboss/simulation_strategies/simulation_strategy_factory.py b/theboss/simulation_strategies/simulation_strategy_factory.py index b2c8931..8ed6180 100644 --- a/theboss/simulation_strategies/simulation_strategy_factory.py +++ b/theboss/simulation_strategies/simulation_strategy_factory.py @@ -213,7 +213,6 @@ def _generate_bobs_strategy(self) -> NonuniformLossesApproximationStrategy: return NonuniformLossesApproximationStrategy( bs_permanent_calculator=deepcopy(self.bs_permanent_calculator), approximated_modes_number=approximated_modes_number, - modes_transsmisivity=self._experiment_configuration.uniform_transmissivity, ) def _generate_uniform_losses_bobs_strategy( diff --git a/theboss/simulation_strategies/simulation_strategy_interface.py b/theboss/simulation_strategies/simulation_strategy_interface.py index 97672ba..899b71c 100644 --- a/theboss/simulation_strategies/simulation_strategy_interface.py +++ b/theboss/simulation_strategies/simulation_strategy_interface.py @@ -1,9 +1,7 @@ __author__ = "Tomasz Rybotycki" import abc -from typing import List - -from numpy import ndarray +from typing import Sequence, List, Tuple class SimulationStrategyInterface(abc.ABC): @@ -12,7 +10,9 @@ def __subclasshook__(cls, subclass): return hasattr(subclass, "simulate") and callable(subclass.simulate) @abc.abstractmethod - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int]]: """ Simulate the lossy boson sampling experiment. diff --git a/theboss/simulation_strategies/uniform_loss_simulation_strategy.py b/theboss/simulation_strategies/uniform_loss_simulation_strategy.py index c10136f..8b5f17d 100644 --- a/theboss/simulation_strategies/uniform_loss_simulation_strategy.py +++ b/theboss/simulation_strategies/uniform_loss_simulation_strategy.py @@ -1,40 +1,59 @@ __author__ = "Tomasz Rybotycki" -from typing import List +from typing import List, Sequence, Tuple from numpy import arange, ndarray from numpy.random import choice -from scipy import special +from scipy.special import binom -from ..boson_sampling_simulator import BosonSamplingSimulator -from .fixed_loss_simulation_strategy import FixedLossSimulationStrategy -from .simulation_strategy_interface import SimulationStrategyInterface +from theboss.boson_sampling_simulator import BosonSamplingSimulator +from theboss.simulation_strategies.fixed_loss_simulation_strategy import ( + FixedLossSimulationStrategy, +) +from theboss.simulation_strategies.simulation_strategy_interface import ( + SimulationStrategyInterface, +) class UniformLossSimulationStrategy(SimulationStrategyInterface): + """ + An implementation of a strategy for simulating BS experiments with uniform losses. + """ + def __init__( self, - interferometer_matrix: ndarray, + interferometer_matrix: Sequence[Sequence[complex]], number_of_modes: int, transmissivity: float, ) -> None: - self.interferometer_matrix = interferometer_matrix - self.number_of_modes = number_of_modes - self.transmissivity = transmissivity + self.interferometer_matrix: Sequence[Sequence[complex]] = interferometer_matrix + self.number_of_modes: int = number_of_modes + self.transmissivity: float = transmissivity + + def simulate( + self, input_state: Sequence[int], samples_number: int = 1 + ) -> List[Tuple[int, ...]]: + """ + - def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]: + :param input_state: + + :param samples_number: + + :return: + """ initial_number_of_particles = int(sum(input_state)) # Using n, eta, l notation from the paper. - n = initial_number_of_particles - eta = self.transmissivity + n: int = initial_number_of_particles + eta: float = self.transmissivity - separable_states_weights = [ - pow(eta, l) * special.binom(n, l) * pow(1.0 - eta, n - l) - for l in range(n + 1) + separable_states_weights: List[float] = [ + pow(eta, l) * binom(n, l) * pow(1.0 - eta, n - l) for l in range(n + 1) ] - samples = [] + samples: List[Tuple[int, ...]] = [] + while len(samples) < samples_number: number_of_particles_left_in_selected_separable_state = choice( arange(0, n + 1), p=separable_states_weights