Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor/save result (Sourcery refactored) #875

Closed
wants to merge 22 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
ececb8e
Added DatasetGroup and DatasetGroupModel.
joernweissenborn Oct 8, 2021
7238d32
Replaced Problem with Optimization Groups and Calculators.
joernweissenborn Oct 9, 2021
bf3e413
Removed non-negative-least-squares option from scheme.
joernweissenborn Oct 9, 2021
158c822
🩹 Reactivated skipped test and fixed failing code
s-weigand Oct 13, 2021
f96af6d
👌🗑️ Added back non_negative_least_squares and deprecated group_tolerance
s-weigand Oct 14, 2021
cc4beb9
🩹 Fixed optimaization groups using datasets outside of their groups
s-weigand Oct 14, 2021
703170f
🩹 Fix Benchmarks
s-weigand Oct 15, 2021
a5f9e11
Added test for multiple groups
joernweissenborn Oct 15, 2021
eee6882
Renamed OptimizationGroupCalculators
joernweissenborn Oct 16, 2021
4559277
Adressed Codacity issues
joernweissenborn Oct 16, 2021
4c52592
Refactored DecayMegacomplex.
joernweissenborn Oct 15, 2021
62f5b51
Added DecaySequentialMegacomplex.
joernweissenborn Oct 15, 2021
b02fad7
Added DecayParallelMegacomplex.
joernweissenborn Oct 15, 2021
aa23647
bugfix
joernweissenborn Oct 15, 2021
f6aca9a
Added model generators
joernweissenborn Oct 15, 2021
557959c
Added spectral decay model generator
joernweissenborn Oct 16, 2021
a14fcdd
Refactored examples
joernweissenborn Oct 16, 2021
f8ec2ad
Fix test
joernweissenborn Oct 16, 2021
2159970
Changed result test to use example
joernweissenborn Oct 16, 2021
0259761
Removed legacy format
joernweissenborn Oct 16, 2021
40bc3e1
Changed plugininterface to use SavingOptions
joernweissenborn Oct 16, 2021
94cd6ca
'Refactored by Sourcery'
Oct 19, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 0 additions & 33 deletions benchmark/benchmarks/integration/ex_two_datasets/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
import pickle
from pathlib import Path

from scipy.optimize import OptimizeResult

from glotaran.analysis.optimize import _create_result
from glotaran.analysis.optimize import optimize
from glotaran.analysis.problem_grouped import GroupedProblem
from glotaran.io import load_dataset
from glotaran.io import load_model
from glotaran.io import load_parameters
Expand Down Expand Up @@ -37,41 +32,13 @@ def setup(self):
non_negative_least_squares=True,
optimization_method="TrustRegionReflection",
)
# Values extracted from a previous run of IntegrationTwoDatasets.time_optimize()
self.problem = GroupedProblem(self.scheme)
# pickled OptimizeResult
with open(SCRIPT_DIR / "data/ls_result.pcl", "rb") as ls_result_file:
self.ls_result: OptimizeResult = pickle.load(ls_result_file)
self.free_parameter_labels = [
"inputs.2",
"inputs.3",
"inputs.7",
"inputs.8",
"scale.2",
"rates.k1",
"rates.k2",
"rates.k3",
"irf.center",
"irf.width",
]
self.termination_reason = "The maximum number of function evaluations is exceeded."

def time_optimize(self):
optimize(self.scheme)

def peakmem_optimize(self):
optimize(self.scheme)

def time_create_result(self):
_create_result(
self.problem, self.ls_result, self.free_parameter_labels, self.termination_reason
)

def peakmem_create_result(self):
_create_result(
self.problem, self.ls_result, self.free_parameter_labels, self.termination_reason
)


if __name__ == "__main__":
test = IntegrationTwoDatasets()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@
import pytest
import xarray as xr

from glotaran.analysis.problem_grouped import GroupedProblem
from glotaran.analysis.problem_ungrouped import UngroupedProblem
from glotaran.analysis.optimization_group import OptimizationGroup
from glotaran.model import Megacomplex
from glotaran.model import Model
from glotaran.model import megacomplex
Expand Down Expand Up @@ -55,9 +54,10 @@ def finalize_data(


@monkeypatch_plugin_registry(test_megacomplex={"benchmark": BenchmarkMegacomplex})
def setup_model(index_dependent):
def setup_model(index_dependent, link_clp):
model_dict = {
"megacomplex": {"m1": {"is_index_dependent": index_dependent}},
"dataset_groups": {"default": {"link_clp": link_clp}},
"dataset": {
"dataset1": {"megacomplex": ["m1"]},
"dataset2": {"megacomplex": ["m1"]},
Expand All @@ -83,90 +83,93 @@ def setup_scheme(model):
)


def setup_problem(scheme, grouped):
return GroupedProblem(scheme) if grouped else UngroupedProblem(scheme)
def setup_optimization_group(scheme):
return OptimizationGroup(scheme, scheme.model.get_dataset_groups()["default"])


def test_benchmark_bag_creation(benchmark):

model = setup_model(False)
model = setup_model(False, True)
assert model.valid()

scheme = setup_scheme(model)
problem = setup_problem(scheme, True)
optimization_group = setup_optimization_group(scheme)

benchmark(problem.init_bag)
benchmark(optimization_group._calculator.init_bag)


@pytest.mark.parametrize("grouped", [True, False])
@pytest.mark.parametrize("link_clp", [True, False])
@pytest.mark.parametrize("index_dependent", [True, False])
def test_benchmark_calculate_matrix(benchmark, grouped, index_dependent):
def test_benchmark_calculate_matrix(benchmark, link_clp, index_dependent):

model = setup_model(index_dependent)
model = setup_model(index_dependent, link_clp)
assert model.valid()

scheme = setup_scheme(model)
problem = setup_problem(scheme, grouped)
optimization_group = setup_optimization_group(scheme)

if grouped:
problem.init_bag()
if link_clp:
optimization_group._calculator.init_bag()

benchmark(problem.calculate_matrices)
benchmark(optimization_group._calculator.calculate_matrices)


@pytest.mark.parametrize("grouped", [True, False])
@pytest.mark.parametrize("link_clp", [True, False])
@pytest.mark.parametrize("index_dependent", [True, False])
def test_benchmark_calculate_residual(benchmark, grouped, index_dependent):
def test_benchmark_calculate_residual(benchmark, link_clp, index_dependent):

model = setup_model(index_dependent)
model = setup_model(index_dependent, link_clp)
assert model.valid()

scheme = setup_scheme(model)
problem = setup_problem(scheme, grouped)
optimization_group = setup_optimization_group(scheme)

if grouped:
problem.init_bag()
problem.calculate_matrices()
if link_clp:
optimization_group._calculator.init_bag()

benchmark(problem.calculate_residual)
optimization_group._calculator.calculate_matrices()

benchmark(optimization_group._calculator.calculate_residual)

@pytest.mark.parametrize("grouped", [True, False])

@pytest.mark.parametrize("link_clp", [True, False])
@pytest.mark.parametrize("index_dependent", [True, False])
def test_benchmark_calculate_result_data(benchmark, grouped, index_dependent):
def test_benchmark_calculate_result_data(benchmark, link_clp, index_dependent):

model = setup_model(index_dependent)
model = setup_model(index_dependent, link_clp)
assert model.valid()

scheme = setup_scheme(model)
problem = setup_problem(scheme, grouped)
optimization_group = setup_optimization_group(scheme)

if link_clp:
optimization_group._calculator.init_bag()

optimization_group._calculator.calculate_matrices()

if grouped:
problem.init_bag()
problem.calculate_matrices()
problem.calculate_residual()
optimization_group._calculator.calculate_residual()

benchmark(problem.create_result_data)
benchmark(optimization_group.create_result_data)


# @pytest.mark.skip(reason="To time consuming atm.")
@pytest.mark.parametrize("grouped", [True, False])
@pytest.mark.parametrize("link_clp", [True, False])
@pytest.mark.parametrize("index_dependent", [True, False])
def test_benchmark_optimize_20_runs(benchmark, grouped, index_dependent):
def test_benchmark_optimize_20_runs(benchmark, link_clp, index_dependent):

model = setup_model(index_dependent)
model = setup_model(index_dependent, link_clp)
assert model.valid()

scheme = setup_scheme(model)
problem = setup_problem(scheme, grouped)
optimization_group = setup_optimization_group(scheme)

@benchmark
def run():
if grouped:
problem.init_bag()
if link_clp:
optimization_group._calculator.init_bag()

for _ in range(20):
problem.reset()
problem.full_penalty
optimization_group.reset()
optimization_group._calculator.calculate_full_penalty()

problem.create_result_data()
optimization_group.create_result_data()
2 changes: 1 addition & 1 deletion docs/source/notebooks/quickstart/quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"metadata": {},
"outputs": [],
"source": [
"from glotaran.examples.sequential import dataset\n",
"from glotaran.examples.sequential_spectral_decay import DATASET as dataset\n",
"\n",
"dataset"
]
Expand Down
Loading