Skip to content

Commit

Permalink
update fvgp support; fix default training for new gpcam versions; fix…
Browse files Browse the repository at this point in the history
… setting hyperparameters manually; allow selection of output value to view in cloud
  • Loading branch information
ronpandolfi committed Nov 4, 2024
1 parent 4ab9a1d commit 9e57bb7
Show file tree
Hide file tree
Showing 6 changed files with 78 additions and 46 deletions.
26 changes: 16 additions & 10 deletions tsuchinoko/adaptive/fvgp_gpCAM_in_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,42 +4,48 @@

from gpcam.gp_optimizer import fvGPOptimizer
from .gpCAM_in_process import GPCAMInProcessEngine
from ..graphs.common import GPCamPosteriorCovariance, GPCamAcquisitionFunction, GPCamPosteriorMean, Table

from ..graphs.common import GPCamPosteriorCovariance, GPCamAcquisitionFunction, GPCamPosteriorMean, Table, \
GPCamHyperparameterPlot, Score


class FvgpGPCAMInProcessEngine(GPCAMInProcessEngine):
"""
A multi-task adaptive engine powered by gpCAM: https://gpcam.readthedocs.io/en/latest/
"""

def __init__(self, dimensionality, output_dim, output_number, parameter_bounds, hyperparameters, hyperparameter_bounds, **kwargs):
def __init__(self, dimensionality, output_number, parameter_bounds, hyperparameters, hyperparameter_bounds, **kwargs):
self.kwargs = kwargs
self.output_dim = output_dim
self.output_number = output_number
super(FvgpGPCAMInProcessEngine, self).__init__(dimensionality, parameter_bounds, hyperparameters, hyperparameter_bounds, **kwargs)

if dimensionality == 2:
self.graphs = [GPCamPosteriorCovariance(),
GPCamAcquisitionFunction(),
GPCamPosteriorMean(),
GPCamHyperparameterPlot(),
Score(),
Table()]
elif dimensionality > 2:
self.graphs = [GPCamPosteriorCovariance(),
Table()]

# TODO: refactor this into base
def init_optimizer(self):
parameter_bounds = np.asarray([[self.parameters[('bounds', f'axis_{i}_{edge}')]
for edge in ['min', 'max']]
for i in range(self.dimensionality)])
opts = self.gp_opts.copy()
if sys.platform == 'darwin':
opts['compute_device'] = 'numpy'

self.optimizer = fvGPOptimizer(self.dimensionality, self.output_dim, self.output_number, parameter_bounds)
hyperparameters = np.asarray([self.parameters[('hyperparameters', f'hyperparameter_{i}')]
for i in range(self.num_hyperparameters)])

def init_gp(self, hyperparameters, **opts):
self.optimizer.init_fvgp(hyperparameters, **opts)
self.optimizer = fvGPOptimizer(init_hyperparameters=hyperparameters,
**opts)

def _set_hyperparameter(self, parameter, value):
self.optimizer.gp_initialized = False # Force re-initialization
self.optimizer.init_fvgp(np.asarray([self.parameters[('hyperparameters', f'hyperparameter_{i}')]
for i in range(self.num_hyperparameters)]))

def request_targets(self, position, **kwargs):
kwargs.update({'x_out': np.arange(self.output_number)})
return super().request_targets(position, **kwargs)
16 changes: 6 additions & 10 deletions tsuchinoko/adaptive/gpCAM_in_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,9 @@ def parameters(self):
return GroupParameter(name='top', children=parameters)

def _set_hyperparameter(self, parameter, value):
self.optimizer.gp_initialized = False # Force re-initialization
opts = self.gp_opts.copy()
if sys.platform == 'darwin':
opts['compute_device'] = 'numpy'
# self.optimizer.init_gp(np.asarray([self.parameters[('hyperparameters', f'hyperparameter_{i}')]
# for i in range(self.num_hyperparameters)]), **opts)
hyperparameters = np.asarray([self.parameters[('hyperparameters', f'hyperparameter_{i}')]
for i in range(self.num_hyperparameters)])
self.optimizer.hyperparameters = hyperparameters
self.optimizer.set_hyperparameters(hyperparameters)

def update_measurements(self, data: Data):
with data.r_lock(): # quickly grab values within lock before passing to optimizer
Expand All @@ -153,7 +147,7 @@ def update_metrics(self, data: Data):
except Exception as ex:
logger.exception(ex)

def request_targets(self, position):
def request_targets(self, position, **kwargs):
self.last_position = position
bounds = np.asarray([[self.parameters[('bounds', f'axis_{i}_{edge}')]
for edge in ['min', 'max']]
Expand All @@ -164,7 +158,7 @@ def request_targets(self, position):
if not self.optimizer.gp:
return [[np.random.uniform(bounds[i][0], bounds[i][1]) for i in range(self.dimensionality)] for _ in range(n)]
else:
kwargs = {key: self.parameters[key] for key in ['acquisition_function', 'method', 'pop_size', 'tol']}
kwargs.update({key: self.parameters[key] for key in ['acquisition_function', 'method', 'pop_size', 'tol']})
kwargs.update({'input_set': bounds})
kwargs.update(self.ask_opts)
return self.optimizer.ask(position=position,
Expand All @@ -180,9 +174,11 @@ def train(self):
for N in train_at:
if len(self.optimizer.y_data) > N and N not in self._completed_training[method]:
logger.info('Training in progress. This make take a while...')
self.optimizer.train(np.asarray([[self.parameters[('hyperparameters', f'hyperparameter_{i}_{edge}')]
self.optimizer.train(hyperparameter_bounds=
np.asarray([[self.parameters[('hyperparameters', f'hyperparameter_{i}_{edge}')]
for edge in ['min', 'max']]
for i in range(self.num_hyperparameters)]),
init_hyperparameters=
np.asarray([self.parameters[('hyperparameters', f'hyperparameter_{i}')]
for i in range(self.num_hyperparameters)]), method=method)
self._completed_training[method].add(N)
Expand Down
13 changes: 7 additions & 6 deletions tsuchinoko/examples/multi_task_server_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,12 @@
from scipy import ndimage

from tsuchinoko.adaptive.fvgp_gpCAM_in_process import FvgpGPCAMInProcessEngine
from tsuchinoko.core import ZMQCore
from tsuchinoko.core import ZMQCore, CoreState
from tsuchinoko.execution.simple import SimpleEngine

# NOTES:
# 2 signal variances per task

# Load data from a jpg image to be used as a luminosity map
images = [np.flipud(np.asarray(Image.open(Path(__file__).parent / f'peak{i + 1}.png'))) for i in range(2)]
luminosity = [np.average(image, axis=2) for image in images]
Expand All @@ -23,14 +26,11 @@ def bilinear_sample(pos):

# Define a gpCAM adaptive engine with initial parameters
adaptive = FvgpGPCAMInProcessEngine(dimensionality=2,
output_dim=1,
output_number=2,
parameter_bounds=[(0, images[0].shape[1]),
(0, images[0].shape[0])],
hyperparameters=[255, 100, 100],
hyperparameter_bounds=[(0, 1e5),
(0, 1e5),
(0, 1e5)])
hyperparameters=[255**2, 1, 1, 1],
hyperparameter_bounds=[(1e-1, 1e5)]*4)

# Construct a core server
core = ZMQCore()
Expand All @@ -39,4 +39,5 @@ def bilinear_sample(pos):

if __name__ == '__main__':
# Start the core server
core.state = CoreState.Starting
core.main()
22 changes: 17 additions & 5 deletions tsuchinoko/graphs/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,13 +365,19 @@ def compute(self, data, engine: 'GPCAMInProcessEngine'):
logger.exception(ValueError('The selected acquisition_function is not available for display.'))
return

extra_kwargs={}
output_num = getattr(engine.optimizer.gp, 'output_num', 1)
if output_num > 1:
extra_kwargs['x_out'] = np.arange(output_num)

# calculate acquisition function
acquisition_function_value = engine.optimizer.evaluate_acquisition_function(grid_positions,
acquisition_function=
gpcam_acquisition_functions[
engine.parameters[
'acquisition_function']],
origin=engine.last_position)
origin=engine.last_position,
**extra_kwargs)

try:
acquisition_function_value = acquisition_function_value.reshape(*self.shape)
Expand All @@ -398,13 +404,19 @@ def compute(self, data, engine: 'GPCAMInProcessEngine'):
grid_positions = image_grid(bounds, self.shape)
shape = self.shape

# For GITOMO; needs to be combined with below
# if multi-task, extend the grid_positions to include the task dimension
if hasattr(engine, 'output_number'):
grid_positions = np.vstack([np.hstack([grid_positions, np.full((grid_positions.shape[0], 1), i)]) for i in range(engine.output_number)])
shape = (*self.shape, engine.output_number)
# if hasattr(engine, 'output_number'):
# grid_positions = np.vstack([np.hstack([grid_positions, np.full((grid_positions.shape[0], 1), i)]) for i in range(engine.output_number)])
# shape = (*self.shape, engine.output_number)

extra_kwargs = dict()
if hasattr(engine.optimizer.gp, 'output_num'):
extra_kwargs['x_out'] = np.arange(engine.optimizer.gp.output_num)
shape = (*shape, engine.optimizer.gp.output_num)

# calculate acquisition function
posterior_mean_value = engine.optimizer.posterior_mean(grid_positions)['f(x)'].reshape(*shape)
posterior_mean_value = engine.optimizer.posterior_mean(grid_positions, **extra_kwargs)['f(x)'].reshape(*shape)

# assign to data object with lock
with data.w_lock():
Expand Down
4 changes: 2 additions & 2 deletions tsuchinoko/utils/zmq_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def ask(self):
message = self.from_socket.recv()
self.msg('Received reply: {}'.format(message))

def get(self, save=True, check_interrupted=False, force_load=False):
def get(self, save=True, check_interrupted=False, force_load=False, flags:int=0):
'''Get the current item being published.'''
# message = self.from_socket.recv()

Expand All @@ -127,7 +127,7 @@ def get(self, save=True, check_interrupted=False, force_load=False):

else:
self.msg('Waiting for data/command ({})...'.format(self.now()), 4, 1)
data = self.from_socket.recv_pyobj()
data = self.from_socket.recv_pyobj(flags=flags)

if isinstance(data, (list, tuple, np.ndarray)):
self.msg('Received: list length {}'.format(len(data)), 4, 2)
Expand Down
43 changes: 30 additions & 13 deletions tsuchinoko/widgets/graph_widgets.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,10 @@
import os
import re
import tempfile
from pathlib import Path

from time import perf_counter

import numpy as np
from PySide2.QtCore import Qt, QTimer
from PySide2.QtWidgets import QAction, QFileDialog
from loguru import logger
from pyqtgraph.exporters import ImageExporter
from qtpy import QtWidgets
from qtpy.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout
from qtpy.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QComboBox
from pyqtgraph import InfiniteLine, mkPen, PlotWidget, HistogramLUTWidget, mkBrush, functions as fn, FileDialog

from tsuchinoko.graphics_items.clouditem import CloudItem
Expand Down Expand Up @@ -46,14 +40,25 @@ def __init__(self, data_key:str, accumulates:bool):
self.cloud = CloudItem(name='scatter', size=10)
histlut = HistogramLUTWidget()
histlut.setImageItem(self.cloud)
self.output_selector = QComboBox()
self.output_selector.setHidden(True)
self.output_selector.currentIndexChanged.connect(self.invalidate_cloud)

self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0,0,0,0)
self.layout().setSpacing(0)
hlayout = QHBoxLayout()
self.layout().addLayout(hlayout)
self.layout().addWidget(self.timeline_plot)
right_layout = QVBoxLayout()
right_layout.setSpacing(0)
right_layout.setContentsMargins(0,0,0,0)


self.layout().addWidget(self.timeline_plot)
hlayout.addWidget(self.graph)
hlayout.addWidget(histlut)
hlayout.addLayout(right_layout)
right_layout.addWidget(histlut)
right_layout.addWidget(self.output_selector)

self.graph.addItem(self.cloud)

Expand All @@ -72,6 +77,9 @@ def __init__(self, data_key:str, accumulates:bool):
self.last_play_time = 0
self.play_timer.timeout.connect(self.timeout)

def invalidate_cloud(self):
self.cloud.clear()

def timeline_changed(self):
if not self.cache:
return
Expand Down Expand Up @@ -202,14 +210,23 @@ def update_data(self, data, update_slice: slice):

with data.r_lock():
v = np.asarray(data[self.data_key].copy())
if v.ndim == 2:
x, y = zip(*data.positions)

if v.ndim == 2:
if len(v[0]) > 1:
self.output_selector.setHidden(False)
if self.output_selector.count() != len(v[0]):
self.output_selector.clear()
for i in range(len(v[0])):
self.output_selector.addItem(f'{i}')
self.output_selector.setCurrentIndex(0)
v = v[:, self.output_selector.currentIndex()]
else:
try:
v = np.squeeze(v, 1)
except ValueError:
pass

x, y = zip(*data.positions)

lengths = len(v), len(x), len(y)
min_length = min(lengths)

Expand Down

0 comments on commit 9e57bb7

Please sign in to comment.