-
-
Notifications
You must be signed in to change notification settings - Fork 232
/
Copy pathei_optimization.py
737 lines (619 loc) · 27.7 KB
/
ei_optimization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
import abc
import itertools
import logging
import time
import numpy as np
from typing import Iterable, List, Union, Tuple, Optional
from smac.configspace import (
get_one_exchange_neighbourhood,
Configuration,
ConfigurationSpace,
convert_configurations_to_array,
)
from smac.runhistory.runhistory import RunHistory
from smac.stats.stats import Stats
from smac.optimizer.acquisition import AbstractAcquisitionFunction
from smac.optimizer.random_configuration_chooser import ChooserNoCoolDown
from smac.utils.constants import MAXINT
__author__ = "Aaron Klein, Marius Lindauer"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "Aaron Klein"
__email__ = "kleinaa@cs.uni-freiburg.de"
__version__ = "0.0.1"
class AcquisitionFunctionMaximizer(object, metaclass=abc.ABCMeta):
"""Abstract class for acquisition maximization.
In order to use this class it has to be subclassed and the method
``_maximize`` must be implemented.
Parameters
----------
acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
config_space : ~smac.configspace.ConfigurationSpace
rng : np.random.RandomState or int, optional
"""
def __init__(
self,
acquisition_function: AbstractAcquisitionFunction,
config_space: ConfigurationSpace,
rng: Union[bool, np.random.RandomState] = None
):
self.logger = logging.getLogger(
self.__module__ + "." + self.__class__.__name__
)
self.acquisition_function = acquisition_function
self.config_space = config_space
if rng is None:
self.logger.debug('no rng given, using default seed of 1')
self.rng = np.random.RandomState(seed=1)
else:
self.rng = rng
def maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
**kwargs
) -> Iterable[Configuration]:
"""Maximize acquisition function using ``_maximize``.
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
**kwargs
Returns
-------
iterable
An iterable consisting of :class:`smac.configspace.Configuration`.
"""
return [t[1] for t in self._maximize(runhistory, stats, num_points, **kwargs)]
@abc.abstractmethod
def _maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
**kwargs
) -> Iterable[Tuple[float, Configuration]]:
"""Implements acquisition function maximization.
In contrast to ``maximize``, this method returns an iterable of tuples,
consisting of the acquisition function value and the configuration. This
allows to plug together different acquisition function maximizers.
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
**kwargs
Returns
-------
iterable
An iterable consistng of
tuple(acqusition_value, :class:`smac.configspace.Configuration`).
"""
raise NotImplementedError()
def _sort_configs_by_acq_value(
self,
configs: List[Configuration]
) -> List[Tuple[float, Configuration]]:
"""Sort the given configurations by acquisition value
Parameters
----------
configs : list(Configuration)
Returns
-------
list: (acquisition value, Candidate solutions),
ordered by their acquisition function value
"""
acq_values = self.acquisition_function(configs)
# From here
# http://stackoverflow.com/questions/20197990/how-to-make-argsort-result-to-be-random-between-equal-values
random = self.rng.rand(len(acq_values))
# Last column is primary sort key!
indices = np.lexsort((random.flatten(), acq_values.flatten()))
# Cannot use zip here because the indices array cannot index the
# rand_configs list, because the second is a pure python list
return [(acq_values[ind][0], configs[ind]) for ind in indices[::-1]]
class LocalSearch(AcquisitionFunctionMaximizer):
"""Implementation of SMAC's local search.
Parameters
----------
acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
config_space : ~smac.configspace.ConfigurationSpace
rng : np.random.RandomState or int, optional
max_steps: int
Maximum number of iterations that the local search will perform
n_steps_plateau_walk: int
number of steps during a plateau walk before local search terminates
vectorization_min_obtain : int
Minimal number of neighbors to obtain at once for each local search for vectorized calls. Can be tuned to
reduce the overhead of SMAC
vectorization_max_obtain : int
Maximal number of neighbors to obtain at once for each local search for vectorized calls. Can be tuned to
reduce the overhead of SMAC
"""
def __init__(
self,
acquisition_function: AbstractAcquisitionFunction,
config_space: ConfigurationSpace,
rng: Union[bool, np.random.RandomState] = None,
max_steps: Optional[int] = None,
n_steps_plateau_walk: int = 10,
vectorization_min_obtain: int = 2,
vectorization_max_obtain: int = 64,
):
super().__init__(acquisition_function, config_space, rng)
self.max_steps = max_steps
self.n_steps_plateau_walk = n_steps_plateau_walk
self.vectorization_min_obtain = vectorization_min_obtain
self.vectorization_max_obtain = vectorization_max_obtain
def _maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
additional_start_points: Optional[List[Tuple[float, Configuration]]] = None,
**kwargs
) -> List[Tuple[float, Configuration]]:
"""Starts a local search from the given startpoint and quits
if either the max number of steps is reached or no neighbor
with an higher improvement was found.
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
additional_start_points : Optional[List[Tuple[float, Configuration]]]
Additional start point
***kwargs:
Additional parameters that will be passed to the
acquisition function
Returns
-------
incumbent: np.array(1, D)
The best found configuration
acq_val_incumbent: np.array(1,1)
The acquisition value of the incumbent
"""
init_points = self._get_initial_points(num_points, runhistory, additional_start_points)
configs_acq = self._do_search(init_points)
# shuffle for random tie-break
self.rng.shuffle(configs_acq)
# sort according to acq value
configs_acq.sort(reverse=True, key=lambda x: x[0])
for _, inc in configs_acq:
inc.origin = 'Local Search'
return configs_acq
def _get_initial_points(self, num_points, runhistory, additional_start_points):
if runhistory.empty():
init_points = self.config_space.sample_configuration(size=num_points)
else:
# initiate local search
configs_previous_runs = runhistory.get_all_configs()
# configurations with the highest previous EI
configs_previous_runs_sorted = self._sort_configs_by_acq_value(configs_previous_runs)
configs_previous_runs_sorted = [conf[1] for conf in configs_previous_runs_sorted[:num_points]]
# configurations with the lowest predictive cost, check for None to make unit tests work
if self.acquisition_function.model is not None:
conf_array = convert_configurations_to_array(configs_previous_runs)
costs = self.acquisition_function.model.predict_marginalized_over_instances(conf_array)[0]
# From here
# http://stackoverflow.com/questions/20197990/how-to-make-argsort-result-to-be-random-between-equal-values
random = self.rng.rand(len(costs))
# Last column is primary sort key!
indices = np.lexsort((random.flatten(), costs.flatten()))
# Cannot use zip here because the indices array cannot index the
# rand_configs list, because the second is a pure python list
configs_previous_runs_sorted_by_cost = [configs_previous_runs[ind] for ind in indices][:num_points]
else:
configs_previous_runs_sorted_by_cost = []
if additional_start_points is not None:
additional_start_points = [asp[1] for asp in additional_start_points[:num_points]]
else:
additional_start_points = []
init_points = []
init_points_as_set = set()
for cand in itertools.chain(
configs_previous_runs_sorted,
configs_previous_runs_sorted_by_cost,
additional_start_points,
):
if cand not in init_points_as_set:
init_points.append(cand)
init_points_as_set.add(cand)
return init_points
def _do_search(
self,
start_points: List[Configuration],
**kwargs
) -> List[Tuple[float, Configuration]]:
# Gather data strucuture for starting points
if isinstance(start_points, Configuration):
start_points = [start_points]
incumbents = start_points
# Compute the acquisition value of the incumbents
num_incumbents = len(incumbents)
acq_val_incumbents = self.acquisition_function(incumbents, **kwargs)
if num_incumbents == 1:
acq_val_incumbents = [acq_val_incumbents[0][0]]
else:
acq_val_incumbents = [a[0] for a in acq_val_incumbents]
# Set up additional variables required to do vectorized local search:
# whether the i-th local search is still running
active = [True] * num_incumbents
# number of plateau walks of the i-th local search. Reaching the maximum number is the stopping criterion of
# the local search.
n_no_plateau_walk = [0] * num_incumbents
# tracking the number of steps for logging purposes
local_search_steps = [0] * num_incumbents
# tracking the number of neighbors looked at for logging purposes
neighbors_looked_at = [0] * num_incumbents
# tracking the number of neighbors generated for logging purposse
neighbors_generated = [0] * num_incumbents
# how many neighbors were obtained for the i-th local search. Important to map the individual acquisition
# function values to the correct local search run
obtain_n = [self.vectorization_min_obtain] * num_incumbents
# Tracking the time it takes to compute the acquisition function
times = []
# Set up the neighborhood generators
neighborhood_iterators = []
for i, inc in enumerate(incumbents):
neighborhood_iterators.append(get_one_exchange_neighbourhood(
inc, seed=self.rng.randint(low=0, high=100000)))
local_search_steps[i] += 1
# Keeping track of configurations with equal acquisition value for plateau walking
neighbors_w_equal_acq = [[]] * num_incumbents
num_iters = 0
while np.any(active):
num_iters += 1
# Whether the i-th local search improved. When a new neighborhood is generated, this is used to determine
# whether a step was made (improvement) or not (iterator exhausted)
improved = [False] * num_incumbents
# Used to request a new neighborhood for the incumbent of the i-th local search
new_neighborhood = [False] * num_incumbents
# gather all neighbors
neighbors = []
for i, neighborhood_iterator in enumerate(neighborhood_iterators):
if active[i]:
neighbors_for_i = []
for j in range(obtain_n[i]):
try:
n = next(neighborhood_iterator)
neighbors_generated[i] += 1
neighbors_for_i.append(n)
except StopIteration:
obtain_n[i] = len(neighbors_for_i)
new_neighborhood[i] = True
break
neighbors.extend(neighbors_for_i)
if len(neighbors) != 0:
start_time = time.time()
acq_val = self.acquisition_function(neighbors, **kwargs)
end_time = time.time()
times.append(end_time - start_time)
if np.ndim(acq_val.shape) == 0:
acq_val = [acq_val]
# Comparing the acquisition function of the neighbors with the acquisition value of the incumbent
acq_index = 0
# Iterating the all i local searches
for i in range(num_incumbents):
if not active[i]:
continue
# And for each local search we know how many neighbors we obtained
for j in range(obtain_n[i]):
# The next line is only true if there was an improvement and we basically need to iterate to
# the i+1-th local search
if improved[i]:
acq_index += 1
else:
neighbors_looked_at[i] += 1
# Found a better configuration
if acq_val[acq_index] > acq_val_incumbents[i]:
self.logger.debug(
"Local search %d: Switch to one of the neighbors (after %d configurations).",
i,
neighbors_looked_at[i],
)
incumbents[i] = neighbors[acq_index]
acq_val_incumbents[i] = acq_val[acq_index]
new_neighborhood[i] = True
improved[i] = True
local_search_steps[i] += 1
neighbors_w_equal_acq[i] = []
obtain_n[i] = 1
# Found an equally well performing configuration, keeping it for plateau walking
elif acq_val[acq_index] == acq_val_incumbents[i]:
neighbors_w_equal_acq[i].append(neighbors[acq_index])
acq_index += 1
# Now we check whether we need to create new neighborhoods and whether we need to increase the number of
# plateau walks for one of the local searches. Also disables local searches if the number of plateau walks
# is reached (and all being switched off is the termination criterion).
for i in range(num_incumbents):
if not active[i]:
continue
if obtain_n[i] == 0 or improved[i]:
obtain_n[i] = 2
else:
obtain_n[i] = obtain_n[i] * 2
obtain_n[i] = min(obtain_n[i], self.vectorization_max_obtain)
if new_neighborhood[i]:
if not improved[i] and n_no_plateau_walk[i] < self.n_steps_plateau_walk:
if len(neighbors_w_equal_acq[i]) != 0:
incumbents[i] = neighbors_w_equal_acq[i][0]
neighbors_w_equal_acq[i] = []
n_no_plateau_walk[i] += 1
if n_no_plateau_walk[i] >= self.n_steps_plateau_walk:
active[i] = False
continue
neighborhood_iterators[i] = get_one_exchange_neighbourhood(
incumbents[i], seed=self.rng.randint(low=0, high=100000),
)
self.logger.debug(
"Local searches took %s steps and looked at %s configurations. Computing the acquisition function in "
"vectorized for took %f seconds on average.",
local_search_steps, neighbors_looked_at, np.mean(times),
)
return [(a, i) for a, i in zip(acq_val_incumbents, incumbents)]
class DiffOpt(AcquisitionFunctionMaximizer):
"""Get candidate solutions via DifferentialEvolutionSolvers.
Parameters
----------
acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
config_space : ~smac.configspace.ConfigurationSpace
rng : np.random.RandomState or int, optional
"""
def _maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
_sorted: bool=False,
**kwargs
) -> List[Tuple[float, Configuration]]:
"""DifferentialEvolutionSolver
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
_sorted: bool
whether random configurations are sorted according to acquisition function
**kwargs
not used
Returns
-------
iterable
An iterable consistng of
tuple(acqusition_value, :class:`smac.configspace.Configuration`).
"""
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
configs = []
def func(x):
return -self.acquisition_function([Configuration(self.config_space, vector=x)])
ds = DifferentialEvolutionSolver(func, bounds=[[0, 1], [0, 1]], args=(),
strategy='best1bin', maxiter=1000,
popsize=50, tol=0.01,
mutation=(0.5, 1),
recombination=0.7,
seed=self.rng.randint(1000), polish=True,
callback=None,
disp=False, init='latinhypercube', atol=0)
rval = ds.solve()
for pop, val in zip(ds.population, ds.population_energies):
rc = Configuration(self.config_space, vector=pop)
rc.origin = 'DifferentialEvolution'
configs.append((-val, rc))
configs.sort(key=lambda t: t[0])
configs.reverse()
return configs
class RandomSearch(AcquisitionFunctionMaximizer):
"""Get candidate solutions via random sampling of configurations.
Parameters
----------
acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
config_space : ~smac.configspace.ConfigurationSpace
rng : np.random.RandomState or int, optional
"""
def _maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
_sorted: bool=False,
**kwargs
) -> List[Tuple[float, Configuration]]:
"""Randomly sampled configurations
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
_sorted: bool
whether random configurations are sorted according to acquisition function
**kwargs
not used
Returns
-------
iterable
An iterable consistng of
tuple(acqusition_value, :class:`smac.configspace.Configuration`).
"""
if num_points > 1:
rand_configs = self.config_space.sample_configuration(
size=num_points)
else:
rand_configs = [self.config_space.sample_configuration(size=1)]
if _sorted:
for i in range(len(rand_configs)):
rand_configs[i].origin = 'Random Search (sorted)'
return self._sort_configs_by_acq_value(rand_configs)
else:
for i in range(len(rand_configs)):
rand_configs[i].origin = 'Random Search'
return [(0, rand_configs[i]) for i in range(len(rand_configs))]
class InterleavedLocalAndRandomSearch(AcquisitionFunctionMaximizer):
"""Implements SMAC's default acquisition function optimization.
This optimizer performs local search from the previous best points
according, to the acquisition function, uses the acquisition function to
sort randomly sampled configurations and interleaves unsorted, randomly
sampled configurations in between.
Parameters
----------
acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
config_space : ~smac.configspace.ConfigurationSpace
rng : np.random.RandomState or int, optional
max_steps: int
[LocalSearch] Maximum number of steps that the local search will perform
n_steps_plateau_walk: int
[LocalSearch] number of steps during a plateau walk before local search terminates
n_sls_iterations: int
[Local Search] number of local search iterations
"""
def __init__(
self,
acquisition_function: AbstractAcquisitionFunction,
config_space: ConfigurationSpace,
rng: Union[bool, np.random.RandomState] = None,
max_steps: Optional[int] = None,
n_steps_plateau_walk: int = 10,
n_sls_iterations: int = 10
):
super().__init__(acquisition_function, config_space, rng)
self.random_search = RandomSearch(
acquisition_function=acquisition_function,
config_space=config_space,
rng=rng
)
self.local_search = LocalSearch(
acquisition_function=acquisition_function,
config_space=config_space,
rng=rng,
max_steps=max_steps,
n_steps_plateau_walk=n_steps_plateau_walk
)
self.n_sls_iterations = n_sls_iterations
#=======================================================================
# self.local_search = DiffOpt(
# acquisition_function=acquisition_function,
# config_space=config_space,
# rng=rng
# )
#=======================================================================
def maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
random_configuration_chooser,
**kwargs
) -> Iterable[Configuration]:
"""Maximize acquisition function using ``_maximize``.
Parameters
----------
runhistory: ~smac.runhistory.runhistory.RunHistory
runhistory object
stats: ~smac.stats.stats.Stats
current stats object
num_points: int
number of points to be sampled
random_configuration_chooser: ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser
part of the returned ChallengerList such
that we can interleave random configurations
by a scheme defined by the random_configuration_chooser;
random_configuration_chooser.next_smbo_iteration()
is called at the end of this function
**kwargs
passed to acquisition function
Returns
-------
Iterable[Configuration]
to be concrete: ~smac.ei_optimization.ChallengerList
"""
# Get configurations sorted by EI
next_configs_by_random_search_sorted = self.random_search._maximize(
runhistory,
stats,
num_points,
_sorted=True,
)
next_configs_by_local_search = self.local_search._maximize(
runhistory, stats, self.n_sls_iterations, additional_start_points=next_configs_by_random_search_sorted,
**kwargs
)
# Having the configurations from random search, sorted by their
# acquisition function value is important for the first few iterations
# of SMAC. As long as the random forest predicts constant value, we
# want to use only random configurations. Having them at the begging of
# the list ensures this (even after adding the configurations by local
# search, and then sorting them)
next_configs_by_acq_value = (
next_configs_by_random_search_sorted
+ next_configs_by_local_search
)
next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
self.logger.debug(
"First 5 acq func (origin) values of selected configurations: %s",
str([[_[0], _[1].origin] for _ in next_configs_by_acq_value[:5]])
)
next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]
challengers = ChallengerList(next_configs_by_acq_value,
self.config_space,
random_configuration_chooser)
random_configuration_chooser.next_smbo_iteration()
return challengers
def _maximize(
self,
runhistory: RunHistory,
stats: Stats,
num_points: int,
**kwargs
) -> Iterable[Tuple[float, Configuration]]:
raise NotImplementedError()
class ChallengerList(object):
"""Helper class to interleave random configurations in a list of challengers.
Provides an iterator which returns a random configuration in each second
iteration. Reduces time necessary to generate a list of new challengers
as one does not need to sample several hundreds of random configurations
in each iteration which are never looked at.
Parameters
----------
challengers : list
List of challengers (without interleaved random configurations)
configuration_space : ConfigurationSpace
ConfigurationSpace from which to sample new random configurations.
"""
def __init__(self, challengers, configuration_space, random_configuration_chooser=ChooserNoCoolDown(2.0)):
self.challengers = challengers
self.configuration_space = configuration_space
self._index = 0
self._iteration = 1 # 1-based to prevent from starting with a random configuration
self.random_configuration_chooser = random_configuration_chooser
def __iter__(self):
return self
def __next__(self):
if self._index == len(self.challengers):
raise StopIteration
else:
if self.random_configuration_chooser.check(self._iteration):
config = self.configuration_space.sample_configuration()
config.origin = 'Random Search'
else:
config = self.challengers[self._index]
self._index += 1
self._iteration += 1
return config