diff --git a/glotaran/analysis/problem_grouped.py b/glotaran/analysis/problem_grouped.py index 7711f50ab..e898970b2 100644 --- a/glotaran/analysis/problem_grouped.py +++ b/glotaran/analysis/problem_grouped.py @@ -293,7 +293,12 @@ def calculate_residual(self): self._weighted_residuals = list(map(lambda result: result[2], results)) self._residuals = list(map(lambda result: result[3], results)) self._additional_penalty = calculate_clp_penalties( - self.model, self.parameters, self._clp_labels, self._grouped_clps, self._full_axis + self.model, + self.parameters, + self._clp_labels, + self._grouped_clps, + self._full_axis, + self.dataset_models, ) return self._reduced_clps, self._clps, self._weighted_residuals, self._residuals diff --git a/glotaran/analysis/problem_ungrouped.py b/glotaran/analysis/problem_ungrouped.py index 10b03eff9..8b7fedb18 100644 --- a/glotaran/analysis/problem_ungrouped.py +++ b/glotaran/analysis/problem_ungrouped.py @@ -168,7 +168,12 @@ def _calculate_residual(self, label: str, dataset_model: DatasetModel): clp_labels = self._get_clp_labels(label) additional_penalty = calculate_clp_penalties( - self.model, self.parameters, clp_labels, self._clps[label], global_axis + self.model, + self.parameters, + clp_labels, + self._clps[label], + global_axis, + self.dataset_models, ) if additional_penalty.size != 0: self._additional_penalty.append(additional_penalty) diff --git a/glotaran/analysis/util.py b/glotaran/analysis/util.py index d54cb06b0..e25e79136 100644 --- a/glotaran/analysis/util.py +++ b/glotaran/analysis/util.py @@ -194,27 +194,48 @@ def calculate_clp_penalties( clp_labels: list[list[str]] | list[str], clps: list[np.ndarray], global_axis: np.ndarray, + dataset_models: dict[str, DatasetModel], ) -> np.ndarray: + # TODO: make a decision on how to handle clp_penalties per dataset + # 1. sum up contributions per dataset on each dataset_axis (v0.4.1) + # 2. sum up contributions on the global_axis (future?) + penalties = [] for penalty in model.clp_area_penalties: penalty = penalty.fill(model, parameters) - source_area = _get_area( - penalty.source, - clp_labels, - clps, - penalty.source_intervals, - global_axis, - ) - - target_area = _get_area( - penalty.target, - clp_labels, - clps, - penalty.target_intervals, - global_axis, - ) - + source_area = np.array([]) + target_area = np.array([]) + for _, dataset_model in dataset_models.items(): + dataset_axis = dataset_model.get_global_axis() + + source_area = np.concatenate( + [ + source_area, + _get_area( + penalty.source, + clp_labels, + clps, + penalty.source_intervals, + global_axis, + dataset_axis, + ), + ] + ) + + target_area = np.concatenate( + [ + target_area, + _get_area( + penalty.target, + clp_labels, + clps, + penalty.target_intervals, + global_axis, + dataset_axis, + ), + ] + ) area_penalty = np.abs(np.sum(source_area) - penalty.parameter * np.sum(target_area)) penalties.append(area_penalty * penalty.weight) @@ -228,14 +249,18 @@ def _get_area( clps: list[np.ndarray], intervals: list[tuple[float, float]], global_axis: np.ndarray, + dataset_axis: np.ndarray, ) -> np.ndarray: area = [] for interval in intervals: if interval[0] > global_axis[-1]: continue - - start_idx, end_idx = get_idx_from_interval(interval, global_axis) + bounded_interval = ( + max(interval[0], np.min(dataset_axis)), + min(interval[1], np.max(dataset_axis)), + ) + start_idx, end_idx = get_idx_from_interval(bounded_interval, global_axis) for i in range(start_idx, end_idx + 1): index_clp_labels = clp_labels[i] if isinstance(clp_labels[0], list) else clp_labels if clp_label in index_clp_labels: diff --git a/glotaran/model/clp_penalties.py b/glotaran/model/clp_penalties.py index 2918d9b41..3aa08ea32 100644 --- a/glotaran/model/clp_penalties.py +++ b/glotaran/model/clp_penalties.py @@ -73,6 +73,7 @@ def apply_spectral_penalties( group_tolerance: float, ) -> np.ndarray: + # TODO: seems to duplicate calculate_clp_penalties penalties = [] for penalty in model.clp_area_penalties: