From 45928b68c5330672ddd762184c840dea1a7e02f2 Mon Sep 17 00:00:00 2001 From: Joern Weissenborn Date: Sun, 16 Oct 2022 14:15:56 +0200 Subject: [PATCH 01/11] Remove megacomplex is index dependent. --- .../baseline/baseline_megacomplex.py | 4 - .../test/test_baseline_megacomplex.py | 2 +- .../clp_guide/clp_guide_megacomplex.py | 4 - .../coherent_artifact_megacomplex.py | 57 +++- .../test/test_coherent_artifact.py | 6 +- .../damped_oscillation_megacomplex.py | 66 ++-- .../decay/decay_matrix_gaussian_irf.py | 77 +++++ .../megacomplexes/decay/decay_megacomplex.py | 9 +- .../decay/decay_parallel_megacomplex.py | 9 +- .../decay/decay_sequential_megacomplex.py | 9 +- .../decay/test/test_decay_megacomplex.py | 4 - glotaran/builtin/megacomplexes/decay/util.py | 133 ++++---- .../spectral/spectral_megacomplex.py | 4 - .../spectral/test/test_spectral_model.py | 6 +- glotaran/model/__init__.py | 1 - glotaran/model/clp_constraint.py | 3 +- glotaran/model/dataset_model.py | 20 -- glotaran/model/megacomplex.py | 20 -- glotaran/optimization/estimation_provider.py | 48 +-- glotaran/optimization/matrix_provider.py | 319 ++++++++++-------- glotaran/optimization/test/models.py | 24 +- .../optimization/test/test_constraints.py | 2 +- .../optimization/test/test_optimization.py | 12 +- glotaran/optimization/test/test_relations.py | 2 +- glotaran/simulation/simulation.py | 30 +- setup.cfg | 7 - 26 files changed, 451 insertions(+), 427 deletions(-) create mode 100644 glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py diff --git a/glotaran/builtin/megacomplexes/baseline/baseline_megacomplex.py b/glotaran/builtin/megacomplexes/baseline/baseline_megacomplex.py index 4223bc67e..a769c1d9d 100644 --- a/glotaran/builtin/megacomplexes/baseline/baseline_megacomplex.py +++ b/glotaran/builtin/megacomplexes/baseline/baseline_megacomplex.py @@ -15,7 +15,6 @@ class BaselineMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -24,9 +23,6 @@ def calculate_matrix( matrix = np.ones((model_axis.size, 1), dtype=np.float64) return clp_label, matrix - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return False - def finalize_data( self, dataset_model: DatasetModel, diff --git a/glotaran/builtin/megacomplexes/baseline/test/test_baseline_megacomplex.py b/glotaran/builtin/megacomplexes/baseline/test/test_baseline_megacomplex.py index 404865618..e3dc99dd0 100644 --- a/glotaran/builtin/megacomplexes/baseline/test/test_baseline_megacomplex.py +++ b/glotaran/builtin/megacomplexes/baseline/test/test_baseline_megacomplex.py @@ -19,7 +19,7 @@ def test_baseline(): time = np.asarray(np.arange(0, 50, 1.5)) pixel = np.asarray([0]) dataset_model = fill_item(model.dataset["dataset1"], model, parameters) - matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, None, pixel, time) + matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, pixel, time) compartments = matrix.clp_labels assert len(compartments) == 1 diff --git a/glotaran/builtin/megacomplexes/clp_guide/clp_guide_megacomplex.py b/glotaran/builtin/megacomplexes/clp_guide/clp_guide_megacomplex.py index 239390d2c..6a04ea2bd 100644 --- a/glotaran/builtin/megacomplexes/clp_guide/clp_guide_megacomplex.py +++ b/glotaran/builtin/megacomplexes/clp_guide/clp_guide_megacomplex.py @@ -16,7 +16,6 @@ class ClpGuideMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -25,9 +24,6 @@ def calculate_matrix( matrix = np.ones((1, 1), dtype=np.float64) return clp_label, matrix - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return False - def finalize_data( self, dataset_model: DatasetModel, diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index ca12fb79a..51da3dc01 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -13,7 +13,6 @@ from glotaran.model import Megacomplex from glotaran.model import ModelError from glotaran.model import ParameterType -from glotaran.model import is_dataset_model_index_dependent from glotaran.model import megacomplex @@ -27,7 +26,6 @@ class CoherentArtifactMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -35,27 +33,48 @@ def calculate_matrix( if not 1 <= self.order <= 3: raise ModelError("Coherent artifact order must be between in [1,3]") - if dataset_model.irf is None: + irf = dataset_model.irf + if irf is None: raise ModelError(f'No irf in dataset "{dataset_model.label}"') - if not isinstance(dataset_model.irf, IrfMultiGaussian): + if not isinstance(irf, IrfMultiGaussian): raise ModelError(f'Irf in dataset "{dataset_model.label} is not a gaussian irf."') - irf = dataset_model.irf + matrix_shape = ( + (global_axis.size, model_axis.size, self.order) + if index_dependent(dataset_model) + else (model_axis.size, self.order) + ) + matrix = np.zeros(matrix_shape, dtype=np.float64) + if index_dependent(dataset_model): + centers, widths = [], [] + for global_index in range(global_axis.size): + center, width = self.get_irf_parameter(irf, global_index, global_axis) + centers.append(center) + widths.append(width) + _calculate_coherent_artifact_matrix( + matrix, centers, widths, global_axis.size, model_axis, self.order + ) + + else: + center, width = self.get_irf_parameter(irf, None, global_axis) + _calculate_coherent_artifact_matrix_on_index( + matrix, center, width, model_axis, self.order + ) + + return self.compartments(), matrix + def get_irf_parameter( + self, irf: IrfMultiGaussian, global_index: int | None, global_axis: np.typing.ArrayLike + ) -> tuple[float, float]: center, width, _, shift, _, _ = irf.parameter(global_index, global_axis) center = center[0] - shift width = self.width.value if self.width is not None else width[0] - - matrix = _calculate_coherent_artifact_matrix(center, width, model_axis, self.order) - return self.compartments(), matrix + return center, width def compartments(self): return [f"coherent_artifact_{i}" for i in range(1, self.order + 1)] - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return index_dependent(dataset_model) - def finalize_data( self, dataset_model: DatasetModel, @@ -68,7 +87,7 @@ def finalize_data( model_dimension = dataset.attrs["model_dimension"] dataset.coords["coherent_artifact_order"] = np.arange(1, self.order + 1) response_dimensions = (model_dimension, "coherent_artifact_order") - if is_dataset_model_index_dependent(dataset_model): + if len(dataset.matrix.shape) == 3: response_dimensions = (global_dimension, *response_dimensions) dataset["coherent_artifact_response"] = ( response_dimensions, @@ -82,8 +101,17 @@ def finalize_data( @nb.jit(nopython=True, parallel=True) -def _calculate_coherent_artifact_matrix(center, width, axis, order): - matrix = np.zeros((axis.size, order), dtype=np.float64) +def _calculate_coherent_artifact_matrix( + matrix, centers, widths, global_axis_size, model_axis, order +): + for i in nb.prange(global_axis_size): + _calculate_coherent_artifact_matrix_on_index( + matrix[i], centers[i], widths[i], model_axis, order + ) + + +@nb.jit(nopython=True, parallel=True) +def _calculate_coherent_artifact_matrix_on_index(matrix, center, width, axis, order): matrix[:, 0] = np.exp(-1 * (axis - center) ** 2 / (2 * width**2)) if order > 1: @@ -93,4 +121,3 @@ def _calculate_coherent_artifact_matrix(center, width, axis, order): matrix[:, 2] = ( matrix[:, 0] * (center**2 - width**2 - 2 * center * axis + axis**2) / width**4 ) - return matrix diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index 989634cf1..cc6f7bafb 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -88,7 +88,7 @@ def test_coherent_artifact(spectral_dependence: str): spectral = np.asarray([200, 300, 400]) dataset_model = fill_item(model.dataset["dataset1"], model, parameters) - matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, 0, spectral, time) + matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, spectral, time) compartments = matrix.clp_labels print(compartments) @@ -96,7 +96,7 @@ def test_coherent_artifact(spectral_dependence: str): for i in range(1, 4): assert compartments[i] == f"coherent_artifact_{i}" - assert matrix.matrix.shape == (time.size, 4) + assert matrix.matrix.shape == (spectral.size, time.size, 4) clp = xr.DataArray( np.ones((3, 4)), @@ -113,7 +113,7 @@ def test_coherent_artifact(spectral_dependence: str): ), ], ) - axis = {"time": time, "spectral": clp.spectral} + axis = {"time": time, "spectral": clp.spectral.data} data = simulate(model, "dataset1", parameters, axis, clp) dataset = {"dataset1": data} diff --git a/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py b/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py index 31f470ea0..7178d41b2 100644 --- a/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py +++ b/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py @@ -7,6 +7,7 @@ from glotaran.builtin.megacomplexes.decay.decay_parallel_megacomplex import DecayDatasetModel from glotaran.builtin.megacomplexes.decay.irf import IrfMultiGaussian +from glotaran.builtin.megacomplexes.decay.util import index_dependent from glotaran.model import DatasetModel from glotaran.model import ItemIssue from glotaran.model import Megacomplex @@ -67,7 +68,6 @@ class DampedOscillationMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -88,34 +88,29 @@ def calculate_matrix( ) rates = np.array(self.rates) - matrix = np.ones((model_axis.size, len(clp_label)), dtype=np.float64) + irf = dataset_model.irf + matrix_shape = ( + (global_axis.size, model_axis.size, len(clp_label)) + if index_dependent(dataset_model) + else (model_axis.size, len(clp_label)) + ) + matrix = np.zeros(matrix_shape, dtype=np.float64) - if dataset_model.irf is None: + if irf is None: calculate_damped_oscillation_matrix_no_irf(matrix, frequencies, rates, model_axis) - elif isinstance(dataset_model.irf, IrfMultiGaussian): - centers, widths, scales, shift, _, _ = dataset_model.irf.parameter( - global_index, global_axis - ) - for center, width, scale in zip(centers, widths, scales): - matrix += calculate_damped_oscillation_matrix_gaussian_irf( - frequencies, - rates, - model_axis, - center, - width, - shift, - scale, + elif isinstance(irf, IrfMultiGaussian): + if index_dependent(dataset_model): + for i in range(global_axis.size): + calculate_damped_oscillation_matrix_gaussian_irf_on_index( + matrix[i], frequencies, rates, irf, i, global_axis, model_axis + ) + else: + calculate_damped_oscillation_matrix_gaussian_irf_on_index( + matrix, frequencies, rates, irf, None, global_axis, model_axis ) - matrix /= np.sum(scales) return clp_label, matrix - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return ( - isinstance(dataset_model.irf, IrfMultiGaussian) - and dataset_model.irf.is_index_dependent() - ) - def finalize_data( self, dataset_model: DatasetModel, @@ -159,7 +154,7 @@ def finalize_data( phase, ) - if self.index_dependent(dataset_model): + if index_dependent(dataset_model): dataset[f"{prefix}_sin"] = ( ( global_dimension, @@ -200,6 +195,29 @@ def calculate_damped_oscillation_matrix_no_irf(matrix, frequencies, rates, axis) idx += 2 +def calculate_damped_oscillation_matrix_gaussian_irf_on_index( + matrix: np.typing.ArrayLike, + frequencies: np.typing.ArrayLike, + rates: np.typing.ArrayLike, + irf: IrfMultiGaussian, + global_index: int | None, + global_axis: np.typing.ArrayLike, + model_axis: np.typing.ArrayLike, +): + centers, widths, scales, shift, _, _ = irf.parameter(global_index, global_axis) + for center, width, scale in zip(centers, widths, scales): + matrix += calculate_damped_oscillation_matrix_gaussian_irf( + frequencies, + rates, + model_axis, + center, + width, + shift, + scale, + ) + matrix /= np.sum(scales) + + def calculate_damped_oscillation_matrix_gaussian_irf( frequencies: np.ndarray, rates: np.ndarray, diff --git a/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py b/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py new file mode 100644 index 000000000..45bce187a --- /dev/null +++ b/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py @@ -0,0 +1,77 @@ +import ctypes + +import numba as nb +import numpy as np +from numba.extending import get_cython_function_address + +# This is a work around to use scipy.special function with numba +_dble = ctypes.c_double + +functype = ctypes.CFUNCTYPE(_dble, _dble) + +erf_addr = get_cython_function_address("scipy.special.cython_special", "__pyx_fuse_1erf") +erfcx_addr = get_cython_function_address("scipy.special.cython_special", "__pyx_fuse_1erfcx") + +erf = functype(erf_addr) +erfcx = functype(erfcx_addr) + +SQRT2 = np.sqrt(2) + + +@nb.jit(nopython=True, parallel=True) +def calculate_decay_matrix_gaussian_irf_on_index( + matrix: np.typing.ArrayLike, + rates: np.typing.ArrayLike, + times: np.typing.ArrayLike, + centers: np.typing.ArrayLike, + widths: np.typing.ArrayLike, + scales: np.typing.ArrayLike, + backsweep: bool, + backsweep_period: float | None, +): + """Calculates a decay matrix with a gaussian irf.""" + for n_i in nb.prange(centers.size): + center, width, scale = centers[n_i], widths[n_i], scales[n_i] + for n_r in nb.prange(rates.size): + r_n = rates[n_r] + backsweep_valid = backsweep and abs(r_n) * backsweep_period > 0.001 + alpha = (r_n * width) / SQRT2 + for n_t in nb.prange(times.size): + t_n = times[n_t] + beta = (t_n - center) / (width * SQRT2) + thresh = beta - alpha + if thresh < -1: + matrix[n_t, n_r] += scale * 0.5 * erfcx(-thresh) * np.exp(-beta * beta) + else: + matrix[n_t, n_r] += ( + scale * 0.5 * (1 + erf(thresh)) * np.exp(alpha * (alpha - 2 * beta)) + ) + if backsweep and backsweep_valid: + x1 = np.exp(-r_n * (t_n - center + backsweep_period)) + x2 = np.exp(-r_n * ((backsweep_period / 2) - (t_n - center))) + x3 = np.exp(-r_n * backsweep_period) + matrix[n_t, n_r] += scale * (x1 + x2) / (1 - x3) + + +@nb.jit(nopython=True, parallel=True) +def calculate_decay_matrix_gaussian_irf( + matrix: np.typing.ArrayLike, + rates: np.typing.ArrayLike, + times: np.typing.ArrayLike, + all_centers: np.typing.ArrayLike, + all_widths: np.typing.ArrayLike, + scales: np.typing.ArrayLike, + backsweep: bool, + backsweep_period: float | None, +): + for n_w in nb.prange(all_centers.shape[0]): + calculate_decay_matrix_gaussian_irf_on_index( + matrix[n_w], + rates, + times, + all_centers[n_w], + all_widths[n_w], + scales, + backsweep, + backsweep_period, + ) diff --git a/glotaran/builtin/megacomplexes/decay/decay_megacomplex.py b/glotaran/builtin/megacomplexes/decay/decay_megacomplex.py index 1abadc98d..8ac4ae5a1 100644 --- a/glotaran/builtin/megacomplexes/decay/decay_megacomplex.py +++ b/glotaran/builtin/megacomplexes/decay/decay_megacomplex.py @@ -8,7 +8,6 @@ from glotaran.builtin.megacomplexes.decay.k_matrix import KMatrix from glotaran.builtin.megacomplexes.decay.util import calculate_matrix from glotaran.builtin.megacomplexes.decay.util import finalize_data -from glotaran.builtin.megacomplexes.decay.util import index_dependent from glotaran.model import DatasetModel from glotaran.model import Megacomplex from glotaran.model import ModelError @@ -70,20 +69,14 @@ def get_a_matrix(self, dataset_model: DatasetModel) -> np.ndarray: self.get_compartments(dataset_model), self.get_initial_concentration(dataset_model) ) - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return index_dependent(dataset_model) - def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, ): - return calculate_matrix( - self, dataset_model, global_index, global_axis, model_axis, **kwargs - ) + return calculate_matrix(self, dataset_model, global_axis, model_axis, **kwargs) def finalize_data( self, diff --git a/glotaran/builtin/megacomplexes/decay/decay_parallel_megacomplex.py b/glotaran/builtin/megacomplexes/decay/decay_parallel_megacomplex.py index fddfde848..6ccdfb051 100644 --- a/glotaran/builtin/megacomplexes/decay/decay_parallel_megacomplex.py +++ b/glotaran/builtin/megacomplexes/decay/decay_parallel_megacomplex.py @@ -8,7 +8,6 @@ from glotaran.builtin.megacomplexes.decay.k_matrix import KMatrix from glotaran.builtin.megacomplexes.decay.util import calculate_matrix from glotaran.builtin.megacomplexes.decay.util import finalize_data -from glotaran.builtin.megacomplexes.decay.util import index_dependent from glotaran.model import DatasetModel from glotaran.model import Megacomplex from glotaran.model import ModelItemType @@ -54,20 +53,14 @@ def get_a_matrix(self, dataset_model: DatasetModel) -> np.ndarray: self.get_compartments(dataset_model), self.get_initial_concentration(dataset_model) ) - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return index_dependent(dataset_model) - def calculate_matrix( self, dataset_model: DecayDatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, ): - return calculate_matrix( - self, dataset_model, global_index, global_axis, model_axis, **kwargs - ) + return calculate_matrix(self, dataset_model, global_axis, model_axis, **kwargs) def finalize_data( self, diff --git a/glotaran/builtin/megacomplexes/decay/decay_sequential_megacomplex.py b/glotaran/builtin/megacomplexes/decay/decay_sequential_megacomplex.py index 7534dd3ba..7c9b8f15a 100644 --- a/glotaran/builtin/megacomplexes/decay/decay_sequential_megacomplex.py +++ b/glotaran/builtin/megacomplexes/decay/decay_sequential_megacomplex.py @@ -9,7 +9,6 @@ from glotaran.builtin.megacomplexes.decay.k_matrix import KMatrix from glotaran.builtin.megacomplexes.decay.util import calculate_matrix from glotaran.builtin.megacomplexes.decay.util import finalize_data -from glotaran.builtin.megacomplexes.decay.util import index_dependent from glotaran.model import DatasetModel from glotaran.model import megacomplex @@ -44,20 +43,14 @@ def get_k_matrix(self) -> KMatrix: def get_a_matrix(self, dataset_model: DatasetModel) -> np.ndarray: return self.get_k_matrix().a_matrix_sequential(self.get_compartments(dataset_model)) - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return index_dependent(dataset_model) - def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, ): - return calculate_matrix( - self, dataset_model, global_index, global_axis, model_axis, **kwargs - ) + return calculate_matrix(self, dataset_model, global_axis, model_axis, **kwargs) def finalize_data( self, diff --git a/glotaran/builtin/megacomplexes/decay/test/test_decay_megacomplex.py b/glotaran/builtin/megacomplexes/decay/test/test_decay_megacomplex.py index a08c77271..08346404b 100644 --- a/glotaran/builtin/megacomplexes/decay/test/test_decay_megacomplex.py +++ b/glotaran/builtin/megacomplexes/decay/test/test_decay_megacomplex.py @@ -8,7 +8,6 @@ from glotaran.builtin.megacomplexes.decay import DecayParallelMegacomplex from glotaran.builtin.megacomplexes.decay import DecaySequentialMegacomplex from glotaran.model import Model -from glotaran.model.item import fill_item from glotaran.optimization.optimize import optimize from glotaran.parameter import Parameters from glotaran.project import Scheme @@ -107,9 +106,6 @@ class OneComponentOneChannelGaussianIrf: ] ) print(initial_parameters) - assert model.megacomplex["mc1"].index_dependent( - fill_item(model.dataset["dataset1"], model, initial_parameters) - ) wanted_parameters = Parameters.from_list( [ [101e-3, {"non-negative": True}], diff --git a/glotaran/builtin/megacomplexes/decay/util.py b/glotaran/builtin/megacomplexes/decay/util.py index 809e5dc22..d36bc10c0 100644 --- a/glotaran/builtin/megacomplexes/decay/util.py +++ b/glotaran/builtin/megacomplexes/decay/util.py @@ -4,6 +4,12 @@ import numpy as np import xarray as xr +from glotaran.builtin.megacomplexes.decay.decay_matrix_gaussian_irf import ( + calculate_decay_matrix_gaussian_irf, +) +from glotaran.builtin.megacomplexes.decay.decay_matrix_gaussian_irf import ( + calculate_decay_matrix_gaussian_irf_on_index, +) from glotaran.builtin.megacomplexes.decay.irf import IrfMultiGaussian from glotaran.builtin.megacomplexes.decay.irf import IrfSpectralMultiGaussian from glotaran.model import DatasetModel @@ -32,7 +38,6 @@ def index_dependent(dataset_model: DatasetModel) -> bool: def calculate_matrix( megacomplex: Megacomplex, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -45,12 +50,21 @@ def calculate_matrix( rates = k_matrix.rates(compartments, initial_concentration) # init the matrix - size = (model_axis.size, rates.size) - matrix = np.zeros(size, dtype=np.float64) - - decay_matrix_implementation( - matrix, rates, global_index, global_axis, model_axis, dataset_model + matrix_shape = ( + (global_axis.size, model_axis.size, rates.size) + if index_dependent(dataset_model) + else (model_axis.size, rates.size) ) + matrix = np.zeros(matrix_shape, dtype=np.float64) + + if index_dependent(dataset_model): + decay_matrix_implementation_index_dependent( + matrix, rates, global_axis, model_axis, dataset_model + ) + else: + decay_matrix_implementation_index_independent( + matrix, rates, global_axis, model_axis, dataset_model + ) if not np.all(np.isfinite(matrix)): raise ValueError( @@ -133,10 +147,9 @@ def finalize_data( ) -def decay_matrix_implementation( +def decay_matrix_implementation_index_independent( matrix: np.ndarray, rates: np.ndarray, - global_index: int, global_axis: np.ndarray, model_axis: np.ndarray, dataset_model: DatasetModel, @@ -150,19 +163,18 @@ def decay_matrix_implementation( shift, backsweep, backsweep_period, - ) = dataset_model.irf.parameter(global_index, global_axis) + ) = dataset_model.irf.parameter(None, global_axis) - for center, width, irf_scale in zip(centers, widths, irf_scales): - calculate_decay_matrix_gaussian_irf( - matrix, - rates, - model_axis, - center - shift, - width, - irf_scale, - backsweep, - backsweep_period, - ) + calculate_decay_matrix_gaussian_irf_on_index( + matrix, + rates, + model_axis, + centers - shift, + widths, + irf_scales, + backsweep, + backsweep_period, + ) if dataset_model.irf.normalize: matrix /= np.sum(irf_scales) @@ -170,6 +182,42 @@ def decay_matrix_implementation( calculate_decay_matrix_no_irf(matrix, rates, model_axis) +def decay_matrix_implementation_index_dependent( + matrix: np.ndarray, + rates: np.ndarray, + global_axis: np.ndarray, + model_axis: np.ndarray, + dataset_model: DatasetModel, +): + all_centers, all_widths = [], [] + backsweep, backsweep_period = False, None + irf_scales = [] + for global_index in range(global_axis.size): + ( + centers, + widths, + irf_scales, + shift, + backsweep, + backsweep_period, + ) = dataset_model.irf.parameter(global_index, global_axis) + all_centers.append(centers - shift) + all_widths.append(widths) + + calculate_decay_matrix_gaussian_irf( + matrix, + rates, + model_axis, + np.array(all_centers), + np.array(all_widths), + irf_scales, + backsweep, + backsweep_period, + ) + if dataset_model.irf.normalize: + matrix /= np.sum(irf_scales) + + @nb.jit(nopython=True, parallel=True) def calculate_decay_matrix_no_irf(matrix, rates, times): for n_r in nb.prange(rates.size): @@ -179,51 +227,6 @@ def calculate_decay_matrix_no_irf(matrix, rates, times): matrix[n_t, n_r] += np.exp(-r_n * t_n) -sqrt2 = np.sqrt(2) - - -@nb.jit(nopython=True, parallel=True) -def calculate_decay_matrix_gaussian_irf( - matrix, rates, times, center, width, scale, backsweep, backsweep_period -): - """Calculates a decay matrix with a gaussian irf.""" - for n_r in nb.prange(rates.size): - r_n = rates[n_r] - backsweep_valid = abs(r_n) * backsweep_period > 0.001 - alpha = (r_n * width) / sqrt2 - for n_t in nb.prange(times.size): - t_n = times[n_t] - beta = (t_n - center) / (width * sqrt2) - thresh = beta - alpha - if thresh < -1: - matrix[n_t, n_r] += scale * 0.5 * erfcx(-thresh) * np.exp(-beta * beta) - else: - matrix[n_t, n_r] += ( - scale * 0.5 * (1 + erf(thresh)) * np.exp(alpha * (alpha - 2 * beta)) - ) - if backsweep and backsweep_valid: - x1 = np.exp(-r_n * (t_n - center + backsweep_period)) - x2 = np.exp(-r_n * ((backsweep_period / 2) - (t_n - center))) - x3 = np.exp(-r_n * backsweep_period) - matrix[n_t, n_r] += scale * (x1 + x2) / (1 - x3) - - -import ctypes # noqa: E402 - -# This is a work around to use scipy.special function with numba -from numba.extending import get_cython_function_address # noqa: E402 - -_dble = ctypes.c_double - -functype = ctypes.CFUNCTYPE(_dble, _dble) - -erf_addr = get_cython_function_address("scipy.special.cython_special", "__pyx_fuse_1erf") -erfcx_addr = get_cython_function_address("scipy.special.cython_special", "__pyx_fuse_1erfcx") - -erf = functype(erf_addr) -erfcx = functype(erfcx_addr) - - def retrieve_species_associated_data( dataset_model: DatasetModel, dataset: xr.Dataset, diff --git a/glotaran/builtin/megacomplexes/spectral/spectral_megacomplex.py b/glotaran/builtin/megacomplexes/spectral/spectral_megacomplex.py index 754b589b0..f702d6a40 100644 --- a/glotaran/builtin/megacomplexes/spectral/spectral_megacomplex.py +++ b/glotaran/builtin/megacomplexes/spectral/spectral_megacomplex.py @@ -27,7 +27,6 @@ class SpectralMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -53,9 +52,6 @@ def calculate_matrix( return compartments, matrix - def index_dependent(self, dataset_model: DatasetModel) -> bool: - return False - def finalize_data( self, dataset_model: DatasetModel, diff --git a/glotaran/builtin/megacomplexes/spectral/test/test_spectral_model.py b/glotaran/builtin/megacomplexes/spectral/test/test_spectral_model.py index 18067a0a2..2cad6f063 100644 --- a/glotaran/builtin/megacomplexes/spectral/test/test_spectral_model.py +++ b/glotaran/builtin/megacomplexes/spectral/test/test_spectral_model.py @@ -74,7 +74,7 @@ class OneCompartmentModelInvertedAxis: axis = {"time": time, "spectral": spectral} decay_dataset_model = fill_item(decay_model.dataset["dataset1"], decay_model, decay_parameters) - matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, None, spectral, time) + matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, spectral, time) decay_compartments = matrix.clp_labels clp = xr.DataArray(matrix.matrix, coords=[("time", time), ("clp_label", decay_compartments)]) @@ -132,7 +132,7 @@ class OneCompartmentModelNegativeSkew: axis = {"time": time, "spectral": spectral} decay_dataset_model = fill_item(decay_model.dataset["dataset1"], decay_model, decay_parameters) - matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, None, spectral, time) + matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, spectral, time) decay_compartments = matrix.clp_labels clp = xr.DataArray(matrix.matrix, coords=[("time", time), ("clp_label", decay_compartments)]) @@ -235,7 +235,7 @@ class ThreeCompartmentModel: axis = {"time": time, "spectral": spectral} decay_dataset_model = fill_item(decay_model.dataset["dataset1"], decay_model, decay_parameters) - matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, None, spectral, time) + matrix = MatrixProvider.calculate_dataset_matrix(decay_dataset_model, spectral, time) decay_compartments = matrix.clp_labels clp = xr.DataArray(matrix.matrix, coords=[("time", time), ("clp_label", decay_compartments)]) diff --git a/glotaran/model/__init__.py b/glotaran/model/__init__.py index 95db58bb1..2461c05e9 100644 --- a/glotaran/model/__init__.py +++ b/glotaran/model/__init__.py @@ -6,7 +6,6 @@ from glotaran.model.dataset_group import DatasetGroup from glotaran.model.dataset_model import DatasetModel from glotaran.model.dataset_model import get_dataset_model_model_dimension -from glotaran.model.dataset_model import is_dataset_model_index_dependent from glotaran.model.item import ItemIssue from glotaran.model.item import ModelItem from glotaran.model.item import ModelItemType diff --git a/glotaran/model/clp_constraint.py b/glotaran/model/clp_constraint.py index ae66a17b0..5e8a38f49 100644 --- a/glotaran/model/clp_constraint.py +++ b/glotaran/model/clp_constraint.py @@ -14,13 +14,14 @@ class ClpConstraint(TypedItem, IntervalItem): the respective classes for details. """ + target: str + @item class ZeroConstraint(ClpConstraint): """Constraints the target to 0 in the given interval.""" type: str = "zero" - target: str @item diff --git a/glotaran/model/dataset_model.py b/glotaran/model/dataset_model.py index 38ca81602..3f700beb6 100644 --- a/glotaran/model/dataset_model.py +++ b/glotaran/model/dataset_model.py @@ -197,26 +197,6 @@ class DatasetModel(ModelItem): scale: ParameterType | None = None -def is_dataset_model_index_dependent(dataset_model: DatasetModel) -> bool: - """Check if the dataset model is index dependent. - - Parameters - ---------- - dataset_model: DatasetModel - The dataset model. - - Returns - ------- - bool - """ - if dataset_model.force_index_dependent: - return True - return any( - m.index_dependent(dataset_model) # type:ignore[union-attr] - for m in dataset_model.megacomplex - ) - - def has_dataset_model_global_model(dataset_model: DatasetModel) -> bool: """Check if the dataset model can model the global dimension. diff --git a/glotaran/model/megacomplex.py b/glotaran/model/megacomplex.py index 0b748aaa5..a2dc95b4b 100644 --- a/glotaran/model/megacomplex.py +++ b/glotaran/model/megacomplex.py @@ -84,7 +84,6 @@ def get_dataset_model_type(cls) -> type | None: def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -95,8 +94,6 @@ def calculate_matrix( ---------- dataset_model: DatasetModel The dataset model. - global_index: int | None - The global index. global_axis: np.typing.ArrayLike The global axis. model_axis: np.typing.ArrayLike, @@ -114,23 +111,6 @@ def calculate_matrix( """ raise NotImplementedError - def index_dependent(self, dataset_model: DatasetModel) -> bool: - """Check if the megacomplex is index dependent. - - Parameters - ---------- - dataset_model: DatasetModel - The dataset model. - - Returns - ------- - bool - - .. # noqa: DAR202 - .. # noqa: DAR401 - """ - raise NotImplementedError - def finalize_data( self, dataset_model: DatasetModel, diff --git a/glotaran/optimization/estimation_provider.py b/glotaran/optimization/estimation_provider.py index 9c5db2dcc..38e4e3ad2 100644 --- a/glotaran/optimization/estimation_provider.py +++ b/glotaran/optimization/estimation_provider.py @@ -10,7 +10,6 @@ from glotaran.model import DatasetModel from glotaran.model import EqualAreaPenalty from glotaran.model.dataset_model import has_dataset_model_global_model -from glotaran.model.dataset_model import is_dataset_model_index_dependent from glotaran.model.item import fill_item from glotaran.optimization.data_provider import DataProvider from glotaran.optimization.data_provider import DataProviderLinked @@ -336,7 +335,7 @@ def get_result( coords={global_dimension: global_axis, model_dimension: model_axis}, dims=[model_dimension, global_dimension], ) - clp_labels = self._matrix_provider.get_matrix_container(label, 0).clp_labels + clp_labels = self._matrix_provider.get_matrix_container(label).clp_labels global_clp_labels = self._matrix_provider.get_global_matrix_container( label ).clp_labels @@ -352,34 +351,16 @@ def get_result( coords={global_dimension: global_axis, model_dimension: model_axis}, dims=[model_dimension, global_dimension], ) - if is_dataset_model_index_dependent(dataset_model): - clps[label] = xr.concat( - [ - xr.DataArray( - self._clps[label][i], - coords={ - "clp_label": self._matrix_provider.get_matrix_container( - label, i - ).clp_labels - }, - ) - for i in range(len(self._clps[label])) - ], - dim=global_dimension, - ) - clps[label].coords[global_dimension] = global_axis - - else: - clps[label] = xr.DataArray( - self._clps[label], - coords=( - (global_dimension, global_axis), - ( - "clp_label", - self._matrix_provider.get_matrix_container(label, 0).clp_labels, - ), + clps[label] = xr.DataArray( + self._clps[label], + coords=( + (global_dimension, global_axis), + ( + "clp_label", + self._matrix_provider.get_matrix_container(label).clp_labels, ), - ) + ), + ) return clps, residuals def calculate_full_model_estimation(self, dataset_model: DatasetModel): @@ -416,7 +397,7 @@ def calculate_estimation(self, dataset_model: DatasetModel): reduced_clps, residual = self.calculate_residual( matrix_container.matrix, data[:, index] ) - clp_labels.append(self._matrix_provider.get_matrix_container(label, index).clp_labels) + clp_labels.append(self._matrix_provider.get_matrix_container(label).clp_labels) clp = self.retrieve_clps( clp_labels[index], matrix_container.clp_labels, reduced_clps, global_index_value ) @@ -510,13 +491,8 @@ def get_result( group_datasets = self._data_provider.group_definitions[group_label] dataset_index = group_datasets.index(dataset_label) - global_index = self._data_provider.get_aligned_dataset_indices(index)[ - dataset_index - ] - clp_labels = self._matrix_provider.get_matrix_container( - dataset_label, global_index - ).clp_labels + clp_labels = self._matrix_provider.get_matrix_container(dataset_label).clp_labels dataset_clps.append( xr.DataArray( diff --git a/glotaran/optimization/matrix_provider.py b/glotaran/optimization/matrix_provider.py index af814c889..162f268a9 100644 --- a/glotaran/optimization/matrix_provider.py +++ b/glotaran/optimization/matrix_provider.py @@ -4,6 +4,7 @@ import warnings from dataclasses import dataclass from dataclasses import replace +from typing import Any import numpy as np import xarray as xr @@ -11,7 +12,6 @@ from glotaran.model import DatasetGroup from glotaran.model import DatasetModel from glotaran.model.dataset_model import has_dataset_model_global_model -from glotaran.model.dataset_model import is_dataset_model_index_dependent from glotaran.model.dataset_model import iterate_dataset_model_global_megacomplexes from glotaran.model.dataset_model import iterate_dataset_model_megacomplexes from glotaran.model.interval_item import IntervalItem @@ -29,6 +29,17 @@ class MatrixContainer: matrix: np.ndarray """The matrix.""" + @property + def is_index_dependent(self) -> bool: + """Check if the matrix is index dependent. + + Returns + ------- + bool + Whether the matrix is index dependent. + """ + return len(self.matrix.shape) == 3 + @staticmethod def apply_weight( matrix: np.typing.ArrayLike, weight: np.typing.ArrayLike @@ -92,7 +103,7 @@ def __init__(self, dataset_group: DatasetGroup): The dataset group. """ self._group = dataset_group - self._matrix_containers: dict[str, MatrixContainer | list[MatrixContainer]] = {} + self._matrix_containers: dict[str, MatrixContainer] = {} self._global_matrix_containers: dict[str, MatrixContainer] = {} self._data_provider: DataProvider @@ -107,25 +118,20 @@ def group(self) -> DatasetGroup: """ return self._group - def get_matrix_container(self, dataset_label: str, global_index: int) -> MatrixContainer: + def get_matrix_container(self, dataset_label: str) -> MatrixContainer: """Get the matrix container for a dataset on an index on the global axis. Parameters ---------- dataset_label : str The label of the dataset. - global_index : int - The index on the global axis. Returns ------- MatrixContainer The matrix container. """ - matrix_container = self._matrix_containers[dataset_label] - if is_dataset_model_index_dependent(self.group.dataset_models[dataset_label]): - matrix_container = matrix_container[global_index] # type:ignore[index] - return matrix_container # type:ignore[return-value] + return self._matrix_containers[dataset_label] def calculate_dataset_matrices(self): """Calculate the matrices of the datasets in the dataset group.""" @@ -133,22 +139,13 @@ def calculate_dataset_matrices(self): model_axis = self._data_provider.get_model_axis(label) global_axis = self._data_provider.get_global_axis(label) - if is_dataset_model_index_dependent(dataset_model): - self._matrix_containers[label] = [ - self.calculate_dataset_matrix( - dataset_model, global_index, global_axis, model_axis - ) - for global_index in range(self._data_provider.get_global_axis(label).size) - ] - else: - self._matrix_containers[label] = self.calculate_dataset_matrix( - dataset_model, None, global_axis, model_axis - ) + self._matrix_containers[label] = self.calculate_dataset_matrix( + dataset_model, global_axis, model_axis + ) @staticmethod def calculate_dataset_matrix( dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, global_matrix: bool = False, @@ -159,8 +156,6 @@ def calculate_dataset_matrix( ---------- dataset_model : DatasetModel The dataset model. - global_index : int | None - The index on the global axis. global_axis: np.typing.ArrayLike The global axis. model_axis: np.typing.ArrayLike @@ -184,7 +179,7 @@ def calculate_dataset_matrix( for scale, megacomplex in megacomplex_iterator: this_clp_labels, this_matrix = megacomplex.calculate_matrix( # type:ignore[union-attr] - dataset_model, global_index, global_axis, model_axis + dataset_model, global_axis, model_axis ) if scale is not None: @@ -224,20 +219,44 @@ def combine_megacomplex_matrices( tuple[list[str], np.typing.ArrayLike]: The combined clp labels and matrix. """ - tmp_clp_labels = clp_labels_left + [ + result_clp_labels = clp_labels_left + [ c for c in clp_labels_right if c not in clp_labels_left ] - tmp_matrix = np.zeros((matrix_left.shape[0], len(tmp_clp_labels)), dtype=np.float64) - for idx, label in enumerate(tmp_clp_labels): + result_clp_size = len(result_clp_labels) + + if len(matrix_left.shape) < len(matrix_right.shape): + matrix_left, matrix_right = matrix_right, matrix_left + + left_index_dependent = len(matrix_left.shape) == 3 + right_index_dependent = len(matrix_right.shape) == 3 + + result_shape = ( + (matrix_left.shape[0], matrix_left.shape[1], result_clp_size) + if left_index_dependent + else (matrix_left.shape[0], result_clp_size) + ) + + result_matrix = np.zeros(result_shape, dtype=np.float64) + for idx, label in enumerate(result_clp_labels): if label in clp_labels_left: - tmp_matrix[:, idx] += matrix_left[:, clp_labels_left.index(label)] + if left_index_dependent: + result_matrix[:, :, idx] += matrix_left[:, :, clp_labels_left.index(label)] + else: + result_matrix[:, idx] += matrix_left[:, clp_labels_left.index(label)] if label in clp_labels_right: - tmp_matrix[:, idx] += matrix_right[:, clp_labels_right.index(label)] - return tmp_clp_labels, tmp_matrix + if left_index_dependent: + result_matrix[:, :, idx] += ( + matrix_right[:, :, clp_labels_right.index(label)] + if right_index_dependent + else matrix_right[:, clp_labels_right.index(label)] + ) + else: + result_matrix[:, idx] += matrix_right[:, clp_labels_right.index(label)] + return result_clp_labels, result_matrix @staticmethod - def does_interval_property_apply(prop: IntervalItem, index: int | None) -> bool: - """Check if an interval property applies on an index. + def does_interval_item_apply(prop: IntervalItem, index: int | None) -> bool: + """Check if an interval item applies on an index. Parameters ---------- @@ -263,8 +282,8 @@ def does_interval_property_apply(prop: IntervalItem, index: int | None) -> bool: def reduce_matrix( self, matrix: MatrixContainer, - index: int | None, - ) -> MatrixContainer: + global_axis: np.typing.ArrayLike, + ) -> list[MatrixContainer]: """Reduce a matrix. Applies constraints and relations. @@ -273,31 +292,39 @@ def reduce_matrix( ---------- matrix : MatrixContainer The matrix. - index : int | None - The index on the global axis. + global_axis: np.typing.ArrayLike, + The global axis. Returns ------- MatrixContainer The resulting matrix container. """ - matrix = self.apply_relations(matrix, index) - matrix = self.apply_constraints(matrix, index) - return matrix + result = ( + [ + MatrixContainer(matrix.clp_labels, matrix.matrix[i, :, :]) + for i in range(global_axis.size) + ] + if matrix.is_index_dependent + else [matrix] * global_axis.size + ) + result = self.apply_relations(result, global_axis) + result = self.apply_constraints(result, global_axis) + return result def apply_constraints( self, - matrix: MatrixContainer, - index: int | None, - ) -> MatrixContainer: + matrices: list[MatrixContainer], + global_axis: np.typing.ArrayLike, + ) -> list[MatrixContainer]: """Apply constraints on a matrix. Parameters ---------- - matrix : MatrixContainer - The matrix. - index : int | None - The index on the global axis. + matrices: list[MatrixContainer], + The matrices. + global_axis: np.typing.ArrayLike, + The global axis. Returns ------- @@ -306,33 +333,37 @@ def apply_constraints( """ model = self.group.model if len(model.clp_constraints) == 0: - return matrix - - clp_labels = matrix.clp_labels - removed_clp_labels = [ - c.target # type:ignore[attr-defined] - for c in model.clp_constraints - if c.target in clp_labels # type:ignore[attr-defined] - and self.does_interval_property_apply(c, index) - ] - reduced_clp_labels = [c for c in clp_labels if c not in removed_clp_labels] - mask = [label in reduced_clp_labels for label in clp_labels] - reduced_matrix = matrix.matrix[:, mask] - return MatrixContainer(reduced_clp_labels, reduced_matrix) + return matrices + + for i, index in enumerate(global_axis): + matrix = matrices[i] + clp_labels = matrix.clp_labels + removed_clp_labels = [ + c.target + for c in model.clp_constraints + if c.target in clp_labels and self.does_interval_item_apply(c, index) + ] + if len(removed_clp_labels) == 0: + continue + reduced_clp_labels = [c for c in clp_labels if c not in removed_clp_labels] + mask = [label in reduced_clp_labels for label in clp_labels] + reduced_matrix = matrix.matrix[:, mask] + matrices[i] = MatrixContainer(reduced_clp_labels, reduced_matrix) + return matrices def apply_relations( self, - matrix: MatrixContainer, - index: int | None, - ) -> MatrixContainer: + matrices: list[MatrixContainer], + global_axis: np.typing.ArrayLike, + ) -> list[MatrixContainer]: """Apply relations on a matrix. Parameters ---------- - matrix : MatrixContainer - The matrix. - index : int | None - The index on the global axis. + matrices: list[MatrixContainer], + The matrices. + global_axis: np.typing.ArrayLike, + The global axis. Returns ------- @@ -343,32 +374,39 @@ def apply_relations( parameters = self.group.parameters if len(model.clp_relations) == 0: - return matrix + return matrices - clp_labels = matrix.clp_labels - relation_matrix = np.diagflat([1.0 for _ in clp_labels]) + for i, index in enumerate(global_axis): + matrix = matrices[i] - idx_to_delete = [] - for relation in model.clp_relations: - if relation.target in clp_labels and self.does_interval_property_apply( - relation, index - ): + clp_labels = matrix.clp_labels + relation_matrix = np.diagflat([1.0 for _ in clp_labels]) - if relation.source not in clp_labels: - continue + idx_to_delete = [] + for relation in model.clp_relations: + if relation.target in clp_labels and self.does_interval_item_apply( + relation, index + ): - relation = fill_item(relation, model, parameters) # type:ignore[arg-type] - source_idx = clp_labels.index(relation.source) - target_idx = clp_labels.index(relation.target) - relation_matrix[target_idx, source_idx] = relation.parameter - idx_to_delete.append(target_idx) + if relation.source not in clp_labels: + continue - reduced_clp_labels = [ - label for i, label in enumerate(clp_labels) if i not in idx_to_delete - ] - relation_matrix = np.delete(relation_matrix, idx_to_delete, axis=1) - reduced_matrix = matrix.matrix @ relation_matrix - return MatrixContainer(reduced_clp_labels, reduced_matrix) + relation = fill_item(relation, model, parameters) # type:ignore[arg-type] + source_idx = clp_labels.index(relation.source) + target_idx = clp_labels.index(relation.target) + relation_matrix[target_idx, source_idx] = relation.parameter + idx_to_delete.append(target_idx) + + if len(idx_to_delete) == 0: + continue + + reduced_clp_labels = [ + label for i, label in enumerate(clp_labels) if i not in idx_to_delete + ] + relation_matrix = np.delete(relation_matrix, idx_to_delete, axis=1) + reduced_matrix = matrix.matrix @ relation_matrix + matrices[i] = MatrixContainer(reduced_clp_labels, reduced_matrix) + return matrices def get_result(self) -> tuple[dict[str, xr.DataArray], dict[str, xr.DataArray]]: """Get the results of the matrix calculations. @@ -386,31 +424,22 @@ def get_result(self) -> tuple[dict[str, xr.DataArray], dict[str, xr.DataArray]]: for label, matrix_container in self._matrix_containers.items(): model_dimension = self._data_provider.get_model_dimension(label) model_axis = self._data_provider.get_model_axis(label) - if is_dataset_model_index_dependent(self.group.dataset_models[label]): + matrix_coords: tuple[tuple[str, Any], tuple[str, Any], tuple[str, list[str]]] | tuple[ + tuple[str, Any], tuple[str, list[str]] + ] = ( + (model_dimension, model_axis), + ("clp_label", matrix_container.clp_labels), + ) + if matrix_container.is_index_dependent: global_dimension = self._data_provider.get_global_dimension(label) global_axis = self._data_provider.get_global_axis(label) - matrices[label] = xr.concat( - [ - xr.DataArray( - container.matrix, - coords=( - (model_dimension, model_axis), - ("clp_label", container.clp_labels), - ), - ) - for container in matrix_container # type:ignore[union-attr] - ], - dim=global_dimension, - ) - matrices[label].coords[global_dimension] = global_axis - else: - matrices[label] = xr.DataArray( - matrix_container.matrix, # type:ignore[union-attr] - coords=( - (model_dimension, model_axis), - ("clp_label", matrix_container.clp_labels), # type:ignore[union-attr] - ), + matrix_coords = ( + (global_dimension, global_axis), + matrix_coords[0], + matrix_coords[1], ) + matrices[label] = xr.DataArray(matrix_container.matrix, coords=matrix_coords) + for label, matrix_container in self._global_matrix_containers.items(): global_dimension = self._data_provider.get_global_dimension(label) global_axis = self._data_provider.get_global_axis(label) @@ -513,7 +542,7 @@ def calculate_global_matrices(self): model_axis = self._data_provider.get_model_axis(label) global_axis = self._data_provider.get_global_axis(label) self._global_matrix_containers[label] = self.calculate_dataset_matrix( - dataset_model, None, global_axis, model_axis, global_matrix=True + dataset_model, global_axis, model_axis, global_matrix=True ) def calculate_prepared_matrices(self): @@ -523,20 +552,11 @@ def calculate_prepared_matrices(self): continue scale = float(dataset_model.scale or 1) weight = self._data_provider.get_weight(label) - if is_dataset_model_index_dependent(dataset_model): - self._prepared_matrix_container[label] = [ - self.reduce_matrix( - self.get_matrix_container(label, i).create_scaled_matrix(scale), - global_index, - ) - for i, global_index in enumerate(self._data_provider.get_global_axis(label)) - ] - else: - self._prepared_matrix_container[label] = [ - self.reduce_matrix( - self.get_matrix_container(label, 0).create_scaled_matrix(scale), None - ) - ] * self._data_provider.get_global_axis(label).size + self._prepared_matrix_container[label] = self.reduce_matrix( + self.get_matrix_container(label).create_scaled_matrix(scale), + self._data_provider.get_global_axis(label), + ) + if weight is not None: self._prepared_matrix_container[label] = [ matrix.create_weighted_matrix(weight[:, i]) @@ -548,23 +568,19 @@ def calculate_full_matrices(self): for label, dataset_model in self.group.dataset_models.items(): if has_dataset_model_global_model(dataset_model): global_matrix_container = self.get_global_matrix_container(label) + global_matrix = global_matrix_container.matrix + matrix_container = self.get_matrix_container(label) + matrix = matrix_container.matrix - if is_dataset_model_index_dependent(dataset_model): - global_axis = self._data_provider.get_global_axis(label) + if matrix_container.is_index_dependent: full_matrix = np.concatenate( [ - np.kron( - global_matrix_container.matrix[i, :], - self.get_matrix_container(label, i).matrix, - ) - for i in range(global_axis.size) + np.kron(global_matrix[i, :], matrix[i, :, :]) + for i in range(matrix.shape[0]) ] ) else: - full_matrix = np.kron( - global_matrix_container.matrix, - self.get_matrix_container(label, 0).matrix, - ) + full_matrix = np.kron(global_matrix, matrix) weight = self._data_provider.get_flattened_weight(label) if weight is not None: @@ -628,11 +644,17 @@ def calculate(self): def calculate_aligned_matrices(self): """Calculate the aligned matrices of the dataset group.""" + reduced_matrices = { + label: self.reduce_matrix(matrix_container, self._data_provider.get_global_axis(label)) + for label, matrix_container in self._matrix_containers.items() + } + full_clp_labels = self.align_full_clp_labels() for i, global_index_value in enumerate(self._data_provider.aligned_global_axis): group_label = self._data_provider.get_aligned_group_label(i) + self._aligned_full_clp_labels[i] = full_clp_labels[group_label] group_matrix = self.align_matrices( [ - self.get_matrix_container(label, index) + reduced_matrices[label][index] for label, index in zip( self._data_provider.group_definitions[group_label], self._data_provider.get_aligned_dataset_indices(i), @@ -646,14 +668,35 @@ def calculate_aligned_matrices(self): ], ) - self._aligned_full_clp_labels[i] = group_matrix.clp_labels - group_matrix = self.reduce_matrix(group_matrix, global_index_value) weight = self._data_provider.get_aligned_weight(i) if weight is not None: group_matrix = group_matrix.create_weighted_matrix(weight) self._aligned_matrices[i] = group_matrix + def align_full_clp_labels(self) -> dict[str, list[str]]: + """Align the unreduced clp labels. + + Returns + ------- + dict[str, list[str]] + The aligned clp for every group. + """ + aligned_full_clp_labels: dict[str, list[str]] = {} + + for ( + group_label, + dataset_labels, + ) in self._data_provider.group_definitions.items(): # type:ignore[attr-defined] + aligned_full_clp_labels[group_label] = [] + for dataset_label in dataset_labels: + aligned_full_clp_labels[group_label] += [ + label + for label in self.get_matrix_container(dataset_label).clp_labels + if label not in aligned_full_clp_labels[group_label] + ] + return aligned_full_clp_labels + @staticmethod def align_matrices(matrices: list[MatrixContainer], scales: list[float]) -> MatrixContainer: """Align matrices. diff --git a/glotaran/optimization/test/models.py b/glotaran/optimization/test/models.py index ac2249393..f3324755f 100644 --- a/glotaran/optimization/test/models.py +++ b/glotaran/optimization/test/models.py @@ -19,24 +19,20 @@ class SimpleTestMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model: DatasetModel, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, ): compartments = ["s1", "s2"] - r_compartments = [] array = np.zeros((model_axis.size, len(compartments))) for i in range(len(compartments)): - r_compartments.append(compartments[i]) for j in range(model_axis.size): array[j, i] = (i + j) * model_axis[j] - return r_compartments, array - - def index_dependent(self, dataset_model): - return self.is_index_dependent + if self.is_index_dependent: + array = np.array([array] * global_axis.size) + return compartments, array def finalize_data( self, @@ -66,7 +62,6 @@ class SimpleKineticMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -78,11 +73,10 @@ def calculate_matrix( else: compartments = [f"s{i+1}" for i in range(len(kinpar))] array = np.exp(np.outer(model_axis, kinpar)) + if self.is_index_dependent: + array = np.array([array] * global_axis.size) return compartments, array - def index_dependent(self, dataset_model): - return self.is_index_dependent - def finalize_data( self, dataset_model, @@ -101,7 +95,6 @@ class SimpleSpectralMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -115,9 +108,6 @@ def calculate_matrix( array = np.asarray([[1 for _ in range(model_axis.size)] for _ in compartments]).T return compartments, array - def index_dependent(self, dataset_model): - return False - @megacomplex() class ShapedSpectralMegacomplex(Megacomplex): @@ -130,7 +120,6 @@ class ShapedSpectralMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, @@ -148,9 +137,6 @@ def calculate_matrix( compartments = [f"s{i+1}" for i in range(location.size)] return compartments, array.T - def index_dependent(self, dataset_model): - return False - def finalize_data( self, dataset_model, diff --git a/glotaran/optimization/test/test_constraints.py b/glotaran/optimization/test/test_constraints.py index 87ddb1985..4d7db2e66 100644 --- a/glotaran/optimization/test/test_constraints.py +++ b/glotaran/optimization/test/test_constraints.py @@ -33,7 +33,7 @@ def test_constraint(index_dependent, link_clp): if link_clp else optimization_group._matrix_provider.get_prepared_matrix_container("dataset1", 0) ) - matrix = optimization_group._matrix_provider.get_matrix_container("dataset1", 0) + matrix = optimization_group._matrix_provider.get_matrix_container("dataset1") result_data = optimization_group.create_result_data() print(result_data) # T201 diff --git a/glotaran/optimization/test/test_optimization.py b/glotaran/optimization/test/test_optimization.py index 6b82c44b9..0f120fc92 100644 --- a/glotaran/optimization/test/test_optimization.py +++ b/glotaran/optimization/test/test_optimization.py @@ -2,8 +2,6 @@ import pytest import xarray as xr -from glotaran.model.dataset_model import is_dataset_model_index_dependent -from glotaran.model.item import fill_item from glotaran.optimization.optimize import optimize from glotaran.optimization.test.models import SimpleTestModel from glotaran.optimization.test.suites import FullModel @@ -57,12 +55,6 @@ def test_optimization(suite, is_index_dependent, link_clp, weight, method): print(initial_parameters) # T201 print(model.validate(initial_parameters)) # T201 assert model.valid(initial_parameters) - assert ( - is_dataset_model_index_dependent( - fill_item(model.dataset["dataset1"], model, initial_parameters) - ) - == is_index_dependent - ) nr_datasets = 3 if issubclass(suite, ThreeDatasetDecay) else 1 data = {} @@ -120,9 +112,11 @@ def test_optimization(suite, is_index_dependent, link_clp, weight, method): for i, dataset in enumerate(data.values()): resultdata = result.data[f"dataset{i+1}"] - print(f"Result Data {i+1}") # T201 + print(f"Result Data {i+1}") # T201k print("=================") # T201 print(resultdata) # T201 + assert "matrix" in resultdata + assert len(resultdata.matrix.shape) == (3 if is_index_dependent else 2) assert "residual" in resultdata assert "residual_left_singular_vectors" in resultdata assert "residual_right_singular_vectors" in resultdata diff --git a/glotaran/optimization/test/test_relations.py b/glotaran/optimization/test/test_relations.py index 270130077..019db6e2c 100644 --- a/glotaran/optimization/test/test_relations.py +++ b/glotaran/optimization/test/test_relations.py @@ -35,7 +35,7 @@ def test_relations(index_dependent, link_clp): if link_clp else optimization_group._matrix_provider.get_prepared_matrix_container("dataset1", 0) ) - matrix = optimization_group._matrix_provider.get_matrix_container("dataset1", 0) + matrix = optimization_group._matrix_provider.get_matrix_container("dataset1") result_data = optimization_group.create_result_data() print(result_data) # T201 diff --git a/glotaran/simulation/simulation.py b/glotaran/simulation/simulation.py index e2972cd1a..ce95096f6 100644 --- a/glotaran/simulation/simulation.py +++ b/glotaran/simulation/simulation.py @@ -9,7 +9,6 @@ from glotaran.model import DatasetModel from glotaran.model.dataset_model import get_dataset_model_model_dimension from glotaran.model.dataset_model import has_dataset_model_global_model -from glotaran.model.dataset_model import is_dataset_model_index_dependent from glotaran.model.item import fill_item from glotaran.optimization.matrix_provider import MatrixProvider @@ -128,20 +127,8 @@ def simulate_from_clp( """ if "clp_label" not in clp.coords: raise ValueError("Missing coordinate 'clp_label' in clp.") - matrices = ( - [ - MatrixProvider.calculate_dataset_matrix( - dataset_model, index, np.array(global_axis), model_axis - ) - for index, _ in enumerate(global_axis) - ] - if is_dataset_model_index_dependent(dataset_model) - else [ - MatrixProvider.calculate_dataset_matrix(dataset_model, None, global_axis, model_axis) - ] - * global_axis.size - ) + matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, global_axis, model_axis) result = xr.DataArray( np.zeros((model_axis.size, global_axis.size)), coords=[ @@ -151,9 +138,10 @@ def simulate_from_clp( ) result = result.to_dataset(name="data") for i in range(global_axis.size): + this_matrix = matrix.matrix[i] if matrix.is_index_dependent else matrix.matrix result.data[:, i] = np.dot( - matrices[i].matrix, - clp.isel({global_dimension: i}).sel({"clp_label": matrices[i].clp_labels}), + this_matrix, + clp.isel({global_dimension: i}).sel({"clp_label": matrix.clp_labels}), ) return result @@ -191,15 +179,11 @@ def simulate_full_model( ValueError Raised if at least one of the dataset model's global megacomplexes is index dependent. """ - if any( - m.index_dependent(dataset_model) # type:ignore[union-attr] - for m in dataset_model.global_megacomplex # type:ignore[union-attr] - ): - raise ValueError("Index dependent models for global dimension are not supported.") - global_matrix = MatrixProvider.calculate_dataset_matrix( - dataset_model, None, global_axis, model_axis, global_matrix=True + dataset_model, global_axis, model_axis, global_matrix=True ) + if global_matrix.is_index_dependent: + raise ValueError("Index dependent models for global dimension are not supported.") global_clp_labels = global_matrix.clp_labels global_matrix = xr.DataArray( global_matrix.matrix.T, diff --git a/setup.cfg b/setup.cfg index b08492677..7e16ef4cd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,13 +57,6 @@ glotaran.plugins.data_io = ascii = glotaran.builtin.io.ascii.wavelength_time_explicit_file sdt = glotaran.builtin.io.sdt.sdt_file_reader nc = glotaran.builtin.io.netCDF.netCDF -glotaran.plugins.megacomplexes = - baseline = glotaran.builtin.megacomplexes.baseline - clp_guide = glotaran.builtin.megacomplexes.clp_guide - coherent_artifact = glotaran.builtin.megacomplexes.coherent_artifact - damped_oscillation = glotaran.builtin.megacomplexes.damped_oscillation - decay = glotaran.builtin.megacomplexes.decay - spectral = glotaran.builtin.megacomplexes.spectral glotaran.plugins.project_io = yml = glotaran.builtin.io.yml.yml csv = glotaran.builtin.io.pandas.csv From 97a88e5ec18172be9fa698768a855bfc80c8977a Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Fri, 11 Nov 2022 22:22:48 +0100 Subject: [PATCH 02/11] =?UTF-8?q?=F0=9F=A9=B9Fix=20optimization=20benchmar?= =?UTF-8?q?k?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adapted to changed Megacomplex API --- benchmark/pytest/analysis/test_optimization_group.py | 1 - 1 file changed, 1 deletion(-) diff --git a/benchmark/pytest/analysis/test_optimization_group.py b/benchmark/pytest/analysis/test_optimization_group.py index 55c259647..58b417544 100644 --- a/benchmark/pytest/analysis/test_optimization_group.py +++ b/benchmark/pytest/analysis/test_optimization_group.py @@ -44,7 +44,6 @@ class BenchmarkMegacomplex(Megacomplex): def calculate_matrix( self, dataset_model, - global_index: int | None, global_axis: np.typing.ArrayLike, model_axis: np.typing.ArrayLike, **kwargs, From e3a190eef35b310db650dff94da42046cbda5e06 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 12 Nov 2022 01:30:44 +0100 Subject: [PATCH 03/11] =?UTF-8?q?=F0=9F=A9=B9=20Added=20back=20megacomplex?= =?UTF-8?q?=20plugin=20registration=20at=20installation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- setup.cfg | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index 7e16ef4cd..b08492677 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,6 +57,13 @@ glotaran.plugins.data_io = ascii = glotaran.builtin.io.ascii.wavelength_time_explicit_file sdt = glotaran.builtin.io.sdt.sdt_file_reader nc = glotaran.builtin.io.netCDF.netCDF +glotaran.plugins.megacomplexes = + baseline = glotaran.builtin.megacomplexes.baseline + clp_guide = glotaran.builtin.megacomplexes.clp_guide + coherent_artifact = glotaran.builtin.megacomplexes.coherent_artifact + damped_oscillation = glotaran.builtin.megacomplexes.damped_oscillation + decay = glotaran.builtin.megacomplexes.decay + spectral = glotaran.builtin.megacomplexes.spectral glotaran.plugins.project_io = yml = glotaran.builtin.io.yml.yml csv = glotaran.builtin.io.pandas.csv From 4da99dc08d0d232c9173ef2427ad3c2ae49d5cda Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 12 Nov 2022 23:08:16 +0100 Subject: [PATCH 04/11] =?UTF-8?q?=F0=9F=A9=B9=F0=9F=A7=AA=20Fixed=20matrix?= =?UTF-8?q?.shape=20comparison=20for=20non=20index=20dependent=20case?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../coherent_artifact/test/test_coherent_artifact.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index cc6f7bafb..d26ab5969 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -96,7 +96,10 @@ def test_coherent_artifact(spectral_dependence: str): for i in range(1, 4): assert compartments[i] == f"coherent_artifact_{i}" - assert matrix.matrix.shape == (spectral.size, time.size, 4) + if spectral_dependence == "none": + assert matrix.matrix.shape == (time.size, 4) + else: + assert matrix.matrix.shape == (spectral.size, time.size, 4) clp = xr.DataArray( np.ones((3, 4)), From 3ad95c4b5bb64346247386f67d48df95bd7c08a1 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 12 Nov 2022 23:17:15 +0100 Subject: [PATCH 05/11] =?UTF-8?q?=F0=9F=9A=87=20Run=20pytest=20and=20pip?= =?UTF-8?q?=20as=20python=20module=20with=20'python=20-m'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This prevents and esoteric CI bug --- .github/workflows/CI_CD_actions.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/CI_CD_actions.yml b/.github/workflows/CI_CD_actions.yml index 02fb0fb0e..eeba19b3e 100644 --- a/.github/workflows/CI_CD_actions.yml +++ b/.github/workflows/CI_CD_actions.yml @@ -55,7 +55,7 @@ jobs: run: | conda install -y pandoc python -m pip install -U pip wheel - pip install . + python -m pip install . python -m pip install -U -r docs/requirements.txt - name: Show installed packages run: pip freeze @@ -80,7 +80,7 @@ jobs: run: | conda install -y pandoc python -m pip install -U pip wheel - pip install . + python -m pip install . python -m pip install -U -r docs/requirements.txt - name: Show installed packages run: pip freeze @@ -106,10 +106,10 @@ jobs: python -m pip install -U -r requirements_dev.txt pip install . - name: Show installed packages - run: pip freeze + run: python -m pip freeze - name: Build docs run: | - py.test -vv --nbval docs/source/notebooks + python -m pytest -vv --nbval docs/source/notebooks test: runs-on: ${{ matrix.os }} @@ -129,12 +129,12 @@ jobs: run: | python -m pip install --upgrade pip wheel python -m pip install -r requirements_dev.txt - pip install -e . + python -m pip install -e . - name: Show installed packages - run: pip freeze + run: python -m pip freeze - name: Run tests run: | - pytest --cov=./ --cov-report term --cov-report xml --cov-config pyproject.toml glotaran + python -m pytest --cov=./ --cov-report term --cov-report xml --cov-config pyproject.toml glotaran - name: Codecov Upload uses: codecov/codecov-action@v3 From 0b4cd969ad19c0b24c89d240f32516cecae5ee85 Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Sat, 19 Nov 2022 20:47:55 +0100 Subject: [PATCH 06/11] =?UTF-8?q?=F0=9F=A9=B9=20Fix=20TypeError=20caused?= =?UTF-8?q?=20by=20returning=20centers=20as=20list=20instead=20of=20array?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the issue: `TypeError: unsupported operand type(s) for -: 'list' and 'int'` --- glotaran/builtin/megacomplexes/decay/irf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/glotaran/builtin/megacomplexes/decay/irf.py b/glotaran/builtin/megacomplexes/decay/irf.py index 10107992e..fcef68319 100644 --- a/glotaran/builtin/megacomplexes/decay/irf.py +++ b/glotaran/builtin/megacomplexes/decay/irf.py @@ -74,9 +74,9 @@ def parameter( f"len(widths) ({len_widths}) none of is 1." ) if len_centers == 1: - centers = [centers[0] for _ in range(len_widths)] + centers = np.asarray([centers[0] for _ in range(len_widths)]) else: - widths = [widths[0] for _ in range(len_centers)] + widths = np.asarray([widths[0] for _ in range(len_centers)]) scales = self.scale if self.scale is not None else [1.0 for _ in centers] scales = scales if isinstance(scales, list) else [scales] From 26b01010c2322a97e39e1070b11a64748ab71c16 Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Sun, 20 Nov 2022 16:29:58 +0100 Subject: [PATCH 07/11] =?UTF-8?q?=F0=9F=A9=B9Avoid=20nested=20parallism=20?= =?UTF-8?q?when=20using=20numba?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This causes issues on Mac OS X. Also the fix results in a 2x speedup on Windows in some cases. Also know as fix for "Terminating: Nested parallel kernel launch detected, the workqueue threading layer does not supported nested parallelism. Try the TBB threading layer." Ref insightful comment on GitHub: https://github.com/lmcinnes/umap/issues/665#issuecomment-859426233 --- .../coherent_artifact/coherent_artifact_megacomplex.py | 2 +- .../builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index 51da3dc01..72d92946a 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -100,7 +100,7 @@ def finalize_data( retrieve_irf(dataset_model, dataset, global_dimension) -@nb.jit(nopython=True, parallel=True) +@nb.jit(nopython=True, parallel=False) def _calculate_coherent_artifact_matrix( matrix, centers, widths, global_axis_size, model_axis, order ): diff --git a/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py b/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py index 45bce187a..d2dc1804e 100644 --- a/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py +++ b/glotaran/builtin/megacomplexes/decay/decay_matrix_gaussian_irf.py @@ -18,7 +18,7 @@ SQRT2 = np.sqrt(2) -@nb.jit(nopython=True, parallel=True) +@nb.jit(nopython=True, parallel=False) def calculate_decay_matrix_gaussian_irf_on_index( matrix: np.typing.ArrayLike, rates: np.typing.ArrayLike, From 4335a0f28b874c6f2971c7cbd2552de3df367a37 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sun, 20 Nov 2022 17:43:11 +0100 Subject: [PATCH 08/11] =?UTF-8?q?=F0=9F=A9=B9=F0=9F=94=A7=20Removed=20unus?= =?UTF-8?q?ed=20type=20ignore?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- glotaran/optimization/matrix_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/glotaran/optimization/matrix_provider.py b/glotaran/optimization/matrix_provider.py index 162f268a9..141762e8e 100644 --- a/glotaran/optimization/matrix_provider.py +++ b/glotaran/optimization/matrix_provider.py @@ -687,7 +687,7 @@ def align_full_clp_labels(self) -> dict[str, list[str]]: for ( group_label, dataset_labels, - ) in self._data_provider.group_definitions.items(): # type:ignore[attr-defined] + ) in self._data_provider.group_definitions.items(): aligned_full_clp_labels[group_label] = [] for dataset_label in dataset_labels: aligned_full_clp_labels[group_label] += [ From 88ff705f2575623483293d2adb2ff686edb1ca75 Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Sun, 20 Nov 2022 20:10:03 +0100 Subject: [PATCH 09/11] =?UTF-8?q?=F0=9F=A9=B9=20Revert=20changes=20to=20in?= =?UTF-8?q?itialization=20of=20doas=20matrix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Was np.ones, was changed to np.zeros, now changes to np.ones again. It should be document why it should be like that! --- .../damped_oscillation/damped_oscillation_megacomplex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py b/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py index 7178d41b2..4d78b1e82 100644 --- a/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py +++ b/glotaran/builtin/megacomplexes/damped_oscillation/damped_oscillation_megacomplex.py @@ -94,7 +94,7 @@ def calculate_matrix( if index_dependent(dataset_model) else (model_axis.size, len(clp_label)) ) - matrix = np.zeros(matrix_shape, dtype=np.float64) + matrix = np.ones(matrix_shape, dtype=np.float64) if irf is None: calculate_damped_oscillation_matrix_no_irf(matrix, frequencies, rates, model_axis) From 84f0dcbe44616110459000a849a0923e792f8cd1 Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Sun, 20 Nov 2022 21:40:51 +0100 Subject: [PATCH 10/11] =?UTF-8?q?=F0=9F=93=9AAdd=20change=20to=20changelog?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change added: - ♻️ Move index dependent calculation to megacomplexes for speed-up (#1175) Not that this results in a significant speedup in simple cases with dispersion and/or multiple datasets, up to 4x. For more complex cases there is little to no speedup. --- changelog.md | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog.md b/changelog.md index 804b13eee..f5aac0059 100644 --- a/changelog.md +++ b/changelog.md @@ -12,6 +12,7 @@ - ✨ Add optimization history to result and iteration column to parameter history (#1134) - ♻️ Complete refactor of model and parameter packages using attrs (#1135) +- ♻️ Move index dependent calculation to megacomplexes for speed-up (#1175) ### 👌 Minor Improvements: From d98dae10a422f9f4b3d762ee5871acab4e89fffb Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Sun, 20 Nov 2022 21:51:32 +0100 Subject: [PATCH 11/11] =?UTF-8?q?=F0=9F=91=8CAddress=20numba=20deprecation?= =?UTF-8?q?=20warning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address some deprecation warnings shown when running the doas example. 2x NumbaPendingDeprecationWarning: - Encountered the use of a type that is scheduled for deprecation: type 'reflected list' found for argument 'centers' of function '_calculate_coherent_artifact_matrix'. - Encountered the use of a type that is scheduled for deprecation: type 'reflected list' found for argument 'widths' of function '_calculate_coherent_artifact_matrix'. For more information visit https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-reflection-for-list-and-set-types --- .../coherent_artifact/coherent_artifact_megacomplex.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index 72d92946a..577ec1729 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -53,7 +53,12 @@ def calculate_matrix( centers.append(center) widths.append(width) _calculate_coherent_artifact_matrix( - matrix, centers, widths, global_axis.size, model_axis, self.order + matrix, + np.asarray(centers), + np.asarray(widths), + global_axis.size, + model_axis, + self.order, ) else: