From 9c5c4d50d1cf62c8f0d642f09d6d7e5ac9ce854a Mon Sep 17 00:00:00 2001 From: Andrew <15331990+ahuang11@users.noreply.github.com> Date: Fri, 16 Aug 2019 04:18:46 -0500 Subject: [PATCH 01/52] Add cformatter (#3906) --- holoviews/plotting/mpl/element.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index f42eec7e74..f24ce9c09e 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -663,6 +663,10 @@ class ColorbarPlot(ElementPlot): User-specified colorbar axis range limits for the plot, as a tuple (low,high). If specified, takes precedence over data and dimension ranges.""") + cformatter = param.ClassSelector( + default=None, class_=(util.basestring, ticker.Formatter, FunctionType), doc=""" + Formatter for ticks along the colorbar axis.""") + colorbar = param.Boolean(default=False, doc=""" Whether to draw a colorbar.""") @@ -770,6 +774,7 @@ def _draw_colorbar(self, element=None, dimension=None, redraw=True): cax = fig.add_axes([l+w+padding+(scaled_w+padding+w*0.15)*offset, b, scaled_w, h]) cbar = fig.colorbar(artist, cax=cax, ax=axis, extend=self._cbar_extend) + self._set_axis_formatter(cbar.ax.yaxis, dimension, self.cformatter) self._adjust_cbar(cbar, label, dimension) self.handles['cax'] = cax self.handles['cbar'] = cbar From e24319765696cb4c8ceb69388ff5e534a7eebd25 Mon Sep 17 00:00:00 2001 From: Joel Ostblom Date: Sat, 17 Aug 2019 04:31:23 -0700 Subject: [PATCH 02/52] Regrid doc elaboration for image data (#3911) --- examples/user_guide/15-Large_Data.ipynb | 37 ++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/examples/user_guide/15-Large_Data.ipynb b/examples/user_guide/15-Large_Data.ipynb index 755c895f3b..2569ec7612 100644 --- a/examples/user_guide/15-Large_Data.ipynb +++ b/examples/user_guide/15-Large_Data.ipynb @@ -159,7 +159,42 @@ "source": [ "In all three of the above plots, `rasterize()` is being called to aggregate the data (a large set of x,y locations) into a rectangular grid, with each grid cell counting up the number of points that fall into it. In the plot on the left, only `rasterize()` is done, and the resulting numeric array of counts is passed to Bokeh for colormapping. Bokeh can then use dynamic (client-side, browser-based) operations in JavaScript, allowing users to have dynamic control over even static HTML plots. For instance, in this case, users can use the Box Select tool and select a range of the histogram shown, dynamically remapping the colors used in the plot to cover the selected range.\n", "\n", - "The other two plots should be identical. In both cases, the numerical array output of `rasterize()` is mapped into RGB colors by Datashader itself, in Python (\"server-side\"), which allows special Datashader computations like the histogram-equalization in the above plots and the \"spreading\" discussed below. The `shade()` and `datashade()` operations accept a `cmap` argument that lets you control the colormap used, which can be selected to match the HoloViews/Bokeh `cmap` option but is strictly independent of it. See ``hv.help(rasterize)``, ``hv.help(shade)``, and ``hv.help(datashade)`` for options that can be selected, and the [Datashader web site](http://datashader.org) for all the details. You can also try the lower-level ``hv.aggregate()`` (for points and lines) and ``hv.regrid()` (for image/raster data) operations, which may provide more control." + "The other two plots should be identical. In both cases, the numerical array output of `rasterize()` is mapped into RGB colors by Datashader itself, in Python (\"server-side\"), which allows special Datashader computations like the histogram-equalization in the above plots and the \"spreading\" discussed below. The `shade()` and `datashade()` operations accept a `cmap` argument that lets you control the colormap used, which can be selected to match the HoloViews/Bokeh `cmap` option but is strictly independent of it. See ``hv.help(rasterize)``, ``hv.help(shade)``, and ``hv.help(datashade)`` for options that can be selected, and the [Datashader web site](http://datashader.org) for all the details. The lower-level `aggregate()` and `regrid()` give more control over how the data is aggregated.\n", + "\n", + "Since datashader only sends the data currently in view to the plotting backend, the default behavior is to rescale colormap to the range of the visible data as the zoom level changes. This behavior may not be desirable when working with images; to instead use a fixed colormap range, the `clim` parameter can be passed to the `bokeh` backend via the `opts()` method. Note that this approach works with `rasterize()` where the colormapping is done by the `bokeh` backend. With `datashade()`, the colormapping is done with the `shade()` function which takes a `clims` parameter directly instead of passing additional parameters to the backend via `opts()`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n = 10_000\n", + "\n", + "# Strong signal on top\n", + "rs = np.random.RandomState(101010)\n", + "x = rs.pareto(n, n)\n", + "y = x + rs.standard_normal(n)\n", + "img1, *_ = np.histogram2d(x, y, bins=60)\n", + "\n", + "# Weak signal in the middle\n", + "x2 = rs.standard_normal(n)\n", + "y2 = 5 * x + 10 * rs.standard_normal(n)\n", + "img2, *_ = np.histogram2d(x2, y2, bins=60)\n", + "\n", + "img = img1 + img2\n", + "hv_img = hv.Image(img).opts(active_tools=['wheel_zoom'])\n", + "auto_scale_grid = rasterize(hv_img).opts(title='Automatic color range rescaling')\n", + "fixed_scale_grid = rasterize(hv_img).opts(title='Fixed color range', clim=(img.min(), img.max()))\n", + "auto_scale_grid + fixed_scale_grid; # Output supressed and gif shown below instead" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](http://assets.holoviews.org/gifs/guides/user_guide/Large_Data/rasterize_color_range.gif)" ] }, { From 700adf8f77805981b981108001bcc1d9df7655ad Mon Sep 17 00:00:00 2001 From: Andrew <15331990+ahuang11@users.noreply.github.com> Date: Sat, 17 Aug 2019 06:32:22 -0500 Subject: [PATCH 03/52] Add cformatter for bokeh (#3913) --- holoviews/plotting/bokeh/element.py | 26 +++++++++++--------------- holoviews/plotting/bokeh/util.py | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index e95e8a6766..e544d680e3 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -13,8 +13,7 @@ from bokeh.models import Renderer, Title, Legend, ColorBar, tools from bokeh.models.axes import CategoricalAxis, DatetimeAxis from bokeh.models.formatters import ( - FuncTickFormatter, TickFormatter, PrintfTickFormatter, - MercatorTickFormatter) + FuncTickFormatter, TickFormatter, MercatorTickFormatter) from bokeh.models.mappers import ( LinearColorMapper, LogColorMapper, CategoricalColorMapper) from bokeh.models.ranges import Range1d, DataRange1d, FactorRange @@ -40,7 +39,7 @@ TOOL_TYPES, date_to_integer, decode_bytes, get_tab_title, glyph_order, py2js_tickformatter, recursive_model_update, theme_attr_json, cds_column_replace, hold_policy, match_dim_specs, - compute_layout_properties) + compute_layout_properties, wrap_formatter) @@ -631,18 +630,7 @@ def _axis_properties(self, axis, key, plot, dimension=None, formatter = self.xformatter if axis == 'x' else self.yformatter if formatter: - if isinstance(formatter, TickFormatter): - pass - elif isinstance(formatter, FunctionType): - msg = ('%sformatter could not be ' - 'converted to tick formatter. ' % axis) - jsfunc = py2js_tickformatter(formatter, msg) - if jsfunc: - formatter = FuncTickFormatter(code=jsfunc) - else: - formatter = None - else: - formatter = PrintfTickFormatter(format=formatter) + formatter = wrap_formatter(formatter, axis) if formatter is not None: axis_props['formatter'] = formatter elif FuncTickFormatter is not None and ax_mapping and isinstance(dimension, Dimension): @@ -1556,6 +1544,10 @@ class ColorbarPlot(ElementPlot): User-specified colorbar axis range limits for the plot, as a tuple (low,high). If specified, takes precedence over data and dimension ranges.""") + cformatter = param.ClassSelector( + default=None, class_=(util.basestring, TickFormatter, FunctionType), doc=""" + Formatter for ticks along the colorbar axis.""") + colorbar = param.Boolean(default=False, doc=""" Whether to display a colorbar.""") @@ -1608,6 +1600,10 @@ def _draw_colorbar(self, plot, color_mapper, prefix=''): if self.clabel: self.colorbar_opts.update({'title': self.clabel}) + + if self.cformatter is not None: + self.colorbar_opts.update({'formatter': wrap_formatter(self.cformatter, 'c')}) + opts = dict(cbar_opts['opts'], color_mapper=color_mapper, ticker=ticker, **self._colorbar_defaults) color_bar = ColorBar(**dict(opts, **self.colorbar_opts)) diff --git a/holoviews/plotting/bokeh/util.py b/holoviews/plotting/bokeh/util.py index f802082b9b..128789da58 100644 --- a/holoviews/plotting/bokeh/util.py +++ b/holoviews/plotting/bokeh/util.py @@ -5,6 +5,7 @@ import sys import calendar import datetime as dt +from types import FunctionType from collections import defaultdict from contextlib import contextmanager @@ -19,6 +20,7 @@ from bokeh.layouts import WidgetBox, Row, Column from bokeh.models import tools from bokeh.models import Model, ToolbarBox, FactorRange, Range1d, Plot, Spacer, CustomJS, GridBox +from bokeh.models.formatters import FuncTickFormatter, TickFormatter, PrintfTickFormatter from bokeh.models.widgets import DataTable, Tabs, Div from bokeh.plotting import Figure from bokeh.themes.theme import Theme @@ -898,3 +900,23 @@ def match_dim_specs(specs1, specs2): if s1 != s2: return False return True + + +def wrap_formatter(formatter, axis): + """ + Wraps formatting function or string in + appropriate bokeh formatter type. + """ + if isinstance(formatter, TickFormatter): + pass + elif isinstance(formatter, FunctionType): + msg = ('%sformatter could not be ' + 'converted to tick formatter. ' % axis) + jsfunc = py2js_tickformatter(formatter, msg) + if jsfunc: + formatter = FuncTickFormatter(code=jsfunc) + else: + formatter = None + else: + formatter = PrintfTickFormatter(format=formatter) + return formatter From df9b91bf5a2e06c1be9332d135eb5cc19e41b7e2 Mon Sep 17 00:00:00 2001 From: Andrew <15331990+ahuang11@users.noreply.github.com> Date: Sun, 18 Aug 2019 05:06:41 -0500 Subject: [PATCH 04/52] Fontsize colorbar (#3914) --- holoviews/plotting/bokeh/element.py | 12 ++++++++++++ holoviews/plotting/mpl/element.py | 14 +++++++++++++- holoviews/plotting/plot.py | 8 ++++---- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index e544d680e3..482935704b 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -1604,6 +1604,18 @@ def _draw_colorbar(self, plot, color_mapper, prefix=''): if self.cformatter is not None: self.colorbar_opts.update({'formatter': wrap_formatter(self.cformatter, 'c')}) + for tk in ['cticks', 'ticks']: + ticksize = self._fontsize(tk, common=False).get('fontsize') + if ticksize is not None: + self.colorbar_opts.update({'major_label_text_font_size': ticksize}) + break + + for lb in ['clabel', 'labels']: + labelsize = self._fontsize(lb, common=False).get('fontsize') + if labelsize is not None: + self.colorbar_opts.update({'title_text_font_size': labelsize}) + break + opts = dict(cbar_opts['opts'], color_mapper=color_mapper, ticker=ticker, **self._colorbar_defaults) color_bar = ColorBar(**dict(opts, **self.colorbar_opts)) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index f24ce9c09e..89be11e1ae 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -708,9 +708,15 @@ def __init__(self, *args, **kwargs): def _adjust_cbar(self, cbar, label, dim): noalpha = math.floor(self.style[self.cyclic_index].get('alpha', 1)) == 1 + + for lb in ['clabel', 'labels']: + labelsize = self._fontsize(lb, common=False).get('fontsize') + if labelsize is not None: + break + if (cbar.solids and noalpha): cbar.solids.set_edgecolor("face") - cbar.set_label(label) + cbar.set_label(label, fontsize=labelsize) if isinstance(self.cbar_ticks, ticker.Locator): cbar.ax.yaxis.set_major_locator(self.cbar_ticks) elif self.cbar_ticks == 0: @@ -727,6 +733,12 @@ def _adjust_cbar(self, cbar, label, dim): cbar.set_ticks(ticks) cbar.set_ticklabels(labels) + for tk in ['cticks', 'ticks']: + ticksize = self._fontsize(tk, common=False).get('fontsize') + if ticksize is not None: + cbar.ax.tick_params(labelsize=ticksize) + break + def _finalize_artist(self, element): if self.colorbar: diff --git a/holoviews/plotting/plot.py b/holoviews/plotting/plot.py index 5077058c1f..d19dd700af 100644 --- a/holoviews/plotting/plot.py +++ b/holoviews/plotting/plot.py @@ -195,8 +195,8 @@ class DimensionedPlot(Plot): together using the 'labels' key.""") #Allowed fontsize keys - _fontsize_keys = ['xlabel','ylabel', 'zlabel', 'labels', - 'xticks', 'yticks', 'zticks', 'ticks', + _fontsize_keys = ['xlabel','ylabel', 'zlabel', 'clabel', 'labels', + 'xticks', 'yticks', 'zticks', 'cticks', 'ticks', 'minor_xticks', 'minor_yticks', 'minor_ticks', 'title', 'legend', 'legend_title', ] @@ -340,9 +340,9 @@ def _fontsize(self, key, label='fontsize', common=True): if key in self.fontsize: return {label:self.fontsize[key]} - elif key in ['zlabel', 'ylabel', 'xlabel'] and 'labels' in self.fontsize: + elif key in ['zlabel', 'ylabel', 'xlabel', 'clabel'] and 'labels' in self.fontsize: return {label:self.fontsize['labels']} - elif key in ['xticks', 'yticks', 'zticks'] and 'ticks' in self.fontsize: + elif key in ['xticks', 'yticks', 'zticks', 'cticks'] and 'ticks' in self.fontsize: return {label:self.fontsize['ticks']} elif key in ['minor_xticks', 'minor_yticks'] and 'minor_ticks' in self.fontsize: return {label:self.fontsize['minor_ticks']} From 2e91f06112e142f8a6189b2df9d30747b4392b81 Mon Sep 17 00:00:00 2001 From: Andrew <15331990+ahuang11@users.noreply.github.com> Date: Sun, 18 Aug 2019 05:07:05 -0500 Subject: [PATCH 05/52] Add extend for plot (#3907) --- holoviews/plotting/mpl/element.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index 89be11e1ae..368befb255 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -695,6 +695,11 @@ class ColorbarPlot(ElementPlot): cbar_width = param.Number(default=0.05, doc=""" Width of the colorbar as a fraction of the main plot""") + cbar_extend = param.ObjectSelector( + objects=['neither', 'both', 'min', 'max'], default=None, doc=""" + If not 'neither', make pointed end(s) for out-of- range values.""" + ) + symmetric = param.Boolean(default=False, doc=""" Whether to make the colormap symmetric around zero.""") @@ -704,7 +709,6 @@ class ColorbarPlot(ElementPlot): def __init__(self, *args, **kwargs): super(ColorbarPlot, self).__init__(*args, **kwargs) - self._cbar_extend = 'neither' def _adjust_cbar(self, cbar, label, dim): noalpha = math.floor(self.style[self.cyclic_index].get('alpha', 1)) == 1 @@ -785,7 +789,7 @@ def _draw_colorbar(self, element=None, dimension=None, redraw=True): scaled_w = w*width cax = fig.add_axes([l+w+padding+(scaled_w+padding+w*0.15)*offset, b, scaled_w, h]) - cbar = fig.colorbar(artist, cax=cax, ax=axis, extend=self._cbar_extend) + cbar = fig.colorbar(artist, cax=cax, ax=axis, extend=self.cbar_extend) self._set_axis_formatter(cbar.ax.yaxis, dimension, self.cformatter) self._adjust_cbar(cbar, label, dimension) self.handles['cax'] = cax @@ -902,12 +906,15 @@ def _norm_kwargs(self, element, ranges, opts, vdim, values=None, prefix=''): el_min, el_max = -np.inf, np.inf vmin = -np.inf if opts[prefix+'vmin'] is None else opts[prefix+'vmin'] vmax = np.inf if opts[prefix+'vmax'] is None else opts[prefix+'vmax'] - if el_min < vmin and el_max > vmax: - self._cbar_extend = 'both' - elif el_min < vmin: - self._cbar_extend = 'min' - elif el_max > vmax: - self._cbar_extend = 'max' + if self.cbar_extend is None: + if el_min < vmin and el_max > vmax: + self.cbar_extend = 'both' + elif el_min < vmin: + self.cbar_extend = 'min' + elif el_max > vmax: + self.cbar_extend = 'max' + else: + self.cbar_extend = 'neither' # Define special out-of-range colors on colormap colors = {} From 19f4c30ced4b07962870c69395070b5817e77d4f Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 20 Aug 2019 11:57:11 +0200 Subject: [PATCH 06/52] Fixed Grid axis alignment (#3916) --- holoviews/plotting/bokeh/util.py | 1 + 1 file changed, 1 insertion(+) diff --git a/holoviews/plotting/bokeh/util.py b/holoviews/plotting/bokeh/util.py index 128789da58..cf98ad911d 100644 --- a/holoviews/plotting/bokeh/util.py +++ b/holoviews/plotting/bokeh/util.py @@ -433,6 +433,7 @@ def make_axis(axis, size, factors, dim, flip=False, rotation=0, p.grid.grid_line_alpha = 0 if axis == 'x': + p.align = 'end' p.yaxis.visible = False axis = p.xaxis[0] if flip: From c525cbd30d3fafb2b4ccb3b932f562220fb5e4cb Mon Sep 17 00:00:00 2001 From: Andrew <15331990+ahuang11@users.noreply.github.com> Date: Fri, 23 Aug 2019 06:17:36 -0500 Subject: [PATCH 07/52] Add colorbar_opts to mpl (#3925) --- holoviews/plotting/mpl/element.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index 368befb255..fd81cb9705 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -670,6 +670,9 @@ class ColorbarPlot(ElementPlot): colorbar = param.Boolean(default=False, doc=""" Whether to draw a colorbar.""") + colorbar_opts = param.Dict(default={}, doc=""" + Allows setting specific styling options for the colorbar.""") + color_levels = param.ClassSelector(default=None, class_=(int, list), doc=""" Number of discrete colors to use when colormapping or a set of color intervals defining the range of values to map each color to.""") @@ -789,7 +792,8 @@ def _draw_colorbar(self, element=None, dimension=None, redraw=True): scaled_w = w*width cax = fig.add_axes([l+w+padding+(scaled_w+padding+w*0.15)*offset, b, scaled_w, h]) - cbar = fig.colorbar(artist, cax=cax, ax=axis, extend=self.cbar_extend) + cbar = fig.colorbar(artist, cax=cax, ax=axis, + extend=self.cbar_extend, **self.colorbar_opts) self._set_axis_formatter(cbar.ax.yaxis, dimension, self.cformatter) self._adjust_cbar(cbar, label, dimension) self.handles['cax'] = cax From 3767304d7b25598d71a7112dd3704f8e221acb59 Mon Sep 17 00:00:00 2001 From: Carlos H Brandt Date: Sun, 1 Sep 2019 16:07:40 +0200 Subject: [PATCH 08/52] [docs] Fix typos in docstring (#3939) --- holoviews/operation/datashader.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index 8145bd54e8..d68cd9e42e 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -75,14 +75,14 @@ class ResamplingOperation(LinkableOperation): if set to None.""") y_range = param.NumericTuple(default=None, length=2, doc=""" - The x_range as a tuple of min and max y-value. Auto-ranges + The y-axis range as a tuple of min and max y value. Auto-ranges if set to None.""") x_sampling = param.Number(default=None, doc=""" - Specifies the smallest allowed sampling interval along the y-axis.""") + Specifies the smallest allowed sampling interval along the x axis.""") y_sampling = param.Number(default=None, doc=""" - Specifies the smallest allowed sampling interval along the y-axis.""") + Specifies the smallest allowed sampling interval along the y axis.""") target = param.ClassSelector(class_=Image, doc=""" A target Image which defines the desired x_range, y_range, From 8ea51d2ecb8dea3415ead914c222e9bf91cfe543 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 12 Sep 2019 16:42:23 +0200 Subject: [PATCH 09/52] Ensure Bar chart sorts multi-category values correctly (#3953) --- holoviews/plotting/bokeh/chart.py | 10 ++++++---- holoviews/tests/plotting/bokeh/testbarplot.py | 12 ++++++++++-- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/holoviews/plotting/bokeh/chart.py b/holoviews/plotting/bokeh/chart.py index 5524536e7f..834ef3cf8e 100644 --- a/holoviews/plotting/bokeh/chart.py +++ b/holoviews/plotting/bokeh/chart.py @@ -838,14 +838,16 @@ def _get_factors(self, element, ranges): xdim, ydim = element.dimensions()[:2] xvals = np.asarray(xdim.values or element.dimension_values(0, False)) - xvals = [x if xvals.dtype.kind in 'SU' else xdim.pprint_value(x) - for x in xvals] + c_is_str = xvals.dtype.kind in 'SU' if gdim and not sdim: gvals = np.asarray(gdim.values or element.dimension_values(gdim, False)) xvals = sorted([(x, g) for x in xvals for g in gvals]) - is_str = gvals.dtype.kind in 'SU' - xvals = [(x, g if is_str else gdim.pprint_value(g)) for (x, g) in xvals] + g_is_str = gvals.dtype.kind in 'SU' + xvals = [(x if c_is_str else xdim.pprint_value(x), g if g_is_str else gdim.pprint_value(g)) + for (x, g) in xvals] + else: + xvals = [x if c_is_str else xdim.pprint_value(x) for x in xvals] coords = xvals, [] if self.invert_axes: coords = coords[::-1] return coords diff --git a/holoviews/tests/plotting/bokeh/testbarplot.py b/holoviews/tests/plotting/bokeh/testbarplot.py index aa4021376d..977091d6ab 100644 --- a/holoviews/tests/plotting/bokeh/testbarplot.py +++ b/holoviews/tests/plotting/bokeh/testbarplot.py @@ -48,14 +48,22 @@ def test_bars_grouped_categories(self): x_range = plot.handles['x_range'] self.assertEqual(x_range.factors, [('A', '0'), ('A', '1'), ('B', '0'), ('B', '1')]) - def test_box_whisker_multi_level_sorted(self): + def test_bars_multi_level_sorted(self): box= Bars((['A', 'B']*15, [3, 10, 1]*10, np.random.randn(30)), ['Group', 'Category'], 'Value').aggregate(function=np.mean) plot = bokeh_renderer.get_plot(box) x_range = plot.handles['x_range'] self.assertEqual(x_range.factors, [ ('A', '1'), ('A', '3'), ('A', '10'), ('B', '1'), ('B', '3'), ('B', '10')]) - + + def test_box_whisker_multi_level_sorted_alphanumerically(self): + box= Bars(([3, 10, 1]*10, ['A', 'B']*15, np.random.randn(30)), + ['Group', 'Category'], 'Value').aggregate(function=np.mean) + plot = bokeh_renderer.get_plot(box) + x_range = plot.handles['x_range'] + self.assertEqual(x_range.factors, [ + ('1', 'A'), ('1', 'B'), ('3', 'A'), ('3', 'B'), ('10', 'A'), ('10', 'B')]) + def test_bars_positive_negative_mixed(self): bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)], kdims=['Index', 'Category'], vdims=['Value']) From b6037a68709b4411536d90d478e99692557823cb Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 12 Sep 2019 16:42:37 +0200 Subject: [PATCH 10/52] Fixed bug in bokeh HoverTool lookup (#3952) --- holoviews/plotting/bokeh/element.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index 482935704b..d0d099f1c6 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -2037,8 +2037,8 @@ def _merge_tools(self, subplot): hover_renderers = [] if hover.renderers == 'auto' else hover.renderers renderers = tool_renderers + hover_renderers tool.renderers = list(util.unique_iterator(renderers)) - if 'hover' not in self.handles: - self.handles['hover'] = tool + if 'hover' not in self.handles: + self.handles['hover'] = tool def _get_factors(self, overlay, ranges): From 0f4b41f92156cd07e707e6051f82a33e33200ad5 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 12 Sep 2019 17:08:20 +0200 Subject: [PATCH 11/52] Allow plotting partially irregular QuadMesh (#3955) --- holoviews/core/data/xarray.py | 5 ++++- holoviews/core/util.py | 15 +++++++++++---- holoviews/plotting/bokeh/raster.py | 3 ++- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/holoviews/core/data/xarray.py b/holoviews/core/data/xarray.py index 71ac1b4ae6..99193430c5 100644 --- a/holoviews/core/data/xarray.py +++ b/holoviews/core/data/xarray.py @@ -50,7 +50,10 @@ def shape(cls, dataset, gridded=False): if kd.name in array.dims][::-1] if not all(d in names for d in array.dims): array = np.squeeze(array) - array = array.transpose(*names) + try: + array = array.transpose(*names, transpose_coords=False) + except: + array = array.transpose(*names) # Handle old xarray shape = array.shape if gridded: return shape diff --git a/holoviews/core/util.py b/holoviews/core/util.py index 0cece0767b..1418b7cdf1 100644 --- a/holoviews/core/util.py +++ b/holoviews/core/util.py @@ -1835,10 +1835,17 @@ def expand_grid_coords(dataset, dim): dataset into an ND-array matching the dimensionality of the dataset. """ - arrays = [dataset.interface.coords(dataset, d.name, True) - for d in dataset.kdims] - idx = dataset.get_dimension_index(dim) - return cartesian_product(arrays, flat=False)[idx].T + irregular = [d.name for d in dataset.kdims + if d is not dim and dataset.interface.irregular(dataset, d)] + if irregular: + array = dataset.interface.coords(dataset, dim, True) + example = dataset.interface.values(dataset, irregular[0], True, False) + return array * np.ones_like(example) + else: + arrays = [dataset.interface.coords(dataset, d.name, True) + for d in dataset.kdims] + idx = dataset.get_dimension_index(dim) + return cartesian_product(arrays, flat=False)[idx].T def dt64_to_dt(dt64): diff --git a/holoviews/plotting/bokeh/raster.py b/holoviews/plotting/bokeh/raster.py index 5b361210bf..3c90573a25 100644 --- a/holoviews/plotting/bokeh/raster.py +++ b/holoviews/plotting/bokeh/raster.py @@ -207,7 +207,8 @@ def get_data(self, element, ranges, style): cmapper = self._get_colormapper(z, element, ranges, style) cmapper = {'field': z.name, 'transform': cmapper} - irregular = element.interface.irregular(element, x) + irregular = (element.interface.irregular(element, x) or + element.interface.irregular(element, y)) if irregular: mapping = dict(xs='xs', ys='ys', fill_color=cmapper) else: From 79258606d8f63989eeb50f774e2c678f674eb11d Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 12 Sep 2019 19:23:41 +0200 Subject: [PATCH 12/52] Fixed error message for Image data with wrong dimensionality (#3956) --- holoviews/element/raster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/holoviews/element/raster.py b/holoviews/element/raster.py index 18515fb8b6..a8e6def795 100644 --- a/holoviews/element/raster.py +++ b/holoviews/element/raster.py @@ -283,8 +283,8 @@ def __init__(self, data, kdims=None, vdims=None, bounds=None, extents=None, if not xdensity: xdensity = 1 if not ydensity: ydensity = 1 elif isinstance(data, np.ndarray) and data.ndim < self._ndim: - raise ValueError('%s type expects %d-D array received %d-D' - 'array.' % (self._ndim, data.ndim)) + raise ValueError('%s type expects %d-D array received %d-D ' + 'array.' % (type(self).__name__, self._ndim, data.ndim)) if rtol is not None: params['rtol'] = rtol From 237de09122f3fe45657fbac251b64bd5534adc98 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 12 Sep 2019 19:49:57 +0200 Subject: [PATCH 13/52] Fix Grid level alignment (#3957) --- examples/user_guide/Plotting_with_Bokeh.ipynb | 44 +++++++++++++++++++ holoviews/plotting/bokeh/element.py | 4 ++ holoviews/plotting/bokeh/plot.py | 2 + 3 files changed, 50 insertions(+) diff --git a/examples/user_guide/Plotting_with_Bokeh.ipynb b/examples/user_guide/Plotting_with_Bokeh.ipynb index 5f936c2a42..82a7f5245e 100644 --- a/examples/user_guide/Plotting_with_Bokeh.ipynb +++ b/examples/user_guide/Plotting_with_Bokeh.ipynb @@ -282,6 +282,50 @@ "img.opts(data_aspect=0.5, responsive=True, title='scale both')" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Alignment\n", + "\n", + "The aligment of a plot in a row or column can be controlled using ``align`` option. It controls both the vertical alignment in a row and the horizontal alignment in a column and can be set to one of `'start'`, `'center'` or `'end'` (where `'start'` is the default)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Vertical" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "points = hv.Points(data).opts(axiswise=True)\n", + "img = hv.Image((xs, ys, xs[:, np.newaxis]*np.sin(ys*4)))\n", + "\n", + "img + points.opts(height=200, align='end')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Horizontal" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "(img.opts(axiswise=True, width=200, align='center') + points).cols(1)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index d0d099f1c6..4adb66bc82 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -51,6 +51,9 @@ class ElementPlot(BokehPlot, GenericElementPlot): both 'pan' and 'box_zoom' are drag tools, so if both are listed only the last one will be active.""") + align = param.ObjectSelector(default=None, objects=['start', 'center', 'end'], doc=""" + Alignment (vertical or horizontal) of the plot in a layout.""") + border = param.Number(default=10, doc=""" Minimum border around plot.""") @@ -503,6 +506,7 @@ def _plot_properties(self, key, element): self.callbacks.append(PlotSizeCallback(self, [stream], None)) plot_props = { + 'align': self.align, 'margin': self.margin, 'max_width': self.max_width, 'max_height': self.max_height, diff --git a/holoviews/plotting/bokeh/plot.py b/holoviews/plotting/bokeh/plot.py index d6ff6763b0..8dd547e0bc 100644 --- a/holoviews/plotting/bokeh/plot.py +++ b/holoviews/plotting/bokeh/plot.py @@ -631,6 +631,8 @@ def _create_subplots(self, layout, ranges): kwargs['frame_width'] = width if height is not None: kwargs['frame_height'] = height + if c == 0: + kwargs['align'] = 'end' if c == 0 and r != 0: kwargs['xaxis'] = None if c != 0 and r == 0: From 4d793b8c61ba1818fd9c82f0674c51db7fc7b5fd Mon Sep 17 00:00:00 2001 From: "James A. Bednar" Date: Thu, 12 Sep 2019 16:13:45 -0500 Subject: [PATCH 14/52] Fixed typo --- examples/user_guide/Plotting_with_Bokeh.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/user_guide/Plotting_with_Bokeh.ipynb b/examples/user_guide/Plotting_with_Bokeh.ipynb index 82a7f5245e..abbd7b8c21 100644 --- a/examples/user_guide/Plotting_with_Bokeh.ipynb +++ b/examples/user_guide/Plotting_with_Bokeh.ipynb @@ -288,7 +288,7 @@ "source": [ "## Alignment\n", "\n", - "The aligment of a plot in a row or column can be controlled using ``align`` option. It controls both the vertical alignment in a row and the horizontal alignment in a column and can be set to one of `'start'`, `'center'` or `'end'` (where `'start'` is the default)." + "The alignment of a plot in a row or column can be controlled using the ``align`` option. It controls both the vertical alignment in a row and the horizontal alignment in a column and can be set to one of `'start'`, `'center'` or `'end'` (where `'start'` is the default)." ] }, { From ffdb28eebcc572f34a4244afbee7440b23bff4bb Mon Sep 17 00:00:00 2001 From: Gabriel Corona Date: Fri, 13 Sep 2019 11:02:18 +0200 Subject: [PATCH 15/52] Fix step interpolation rounding for datetime (#3958) When interpolating datetime values in step(), the values where converted in floating types: this causes a lack of precision in the computation. Instead, we can do the computation in datetime64[ns]. --- holoviews/operation/element.py | 4 ++-- holoviews/tests/operation/testoperation.py | 24 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/holoviews/operation/element.py b/holoviews/operation/element.py index f13427beb4..fc5bfc7f97 100644 --- a/holoviews/operation/element.py +++ b/holoviews/operation/element.py @@ -785,9 +785,9 @@ def _process_layer(self, element, key=None): is_datetime = isdatetime(x) if is_datetime: dt_type = 'datetime64[ns]' - x = x.astype(dt_type).astype('int64') + x = x.astype(dt_type) dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:]) - xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x.astype('f'), dvals) + xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals) if is_datetime: xs = xs.astype(dt_type) return element.clone((xs,)+dvals) diff --git a/holoviews/tests/operation/testoperation.py b/holoviews/tests/operation/testoperation.py index 59d6c8339c..a2035a0df4 100644 --- a/holoviews/tests/operation/testoperation.py +++ b/holoviews/tests/operation/testoperation.py @@ -291,10 +291,10 @@ def test_interpolate_datetime_curve_pre(self): values = [0, 1, 2, 3] interpolated = interpolate_curve(Curve((dates, values)), interpolation='steps-pre') dates_interp = np.array([ - '2017-01-01T00:00:16.364011520', '2017-01-01T00:00:16.364011520', - '2017-01-02T00:01:05.465745408', '2017-01-02T00:01:05.465745408', - '2017-01-02T23:59:37.128525824', '2017-01-02T23:59:37.128525824', - '2017-01-04T00:00:26.230259712' + '2017-01-01T00:00:00', '2017-01-01T00:00:00', + '2017-01-02T00:00:00', '2017-01-02T00:00:00', + '2017-01-03T00:00:00', '2017-01-03T00:00:00', + '2017-01-04T00:00:00' ], dtype='datetime64[ns]') curve = Curve((dates_interp, [0, 1, 1, 2, 2, 3, 3])) self.assertEqual(interpolated, curve) @@ -317,10 +317,10 @@ def test_interpolate_datetime_curve_mid(self): values = [0, 1, 2, 3] interpolated = interpolate_curve(Curve((dates, values)), interpolation='steps-mid') dates_interp = np.array([ - '2017-01-01T00:00:16.364011520', '2017-01-01T11:59:32.195401728', - '2017-01-01T11:59:32.195401728', '2017-01-02T12:00:21.297135616', - '2017-01-02T12:00:21.297135616', '2017-01-03T12:01:10.398869504', - '2017-01-03T12:01:10.398869504', '2017-01-04T00:00:26.230259712' + '2017-01-01T00:00:00', '2017-01-01T12:00:00', + '2017-01-01T12:00:00', '2017-01-02T12:00:00', + '2017-01-02T12:00:00', '2017-01-03T12:00:00', + '2017-01-03T12:00:00', '2017-01-04T00:00:00' ], dtype='datetime64[ns]') curve = Curve((dates_interp, [0, 0, 1, 1, 2, 2, 3, 3])) self.assertEqual(interpolated, curve) @@ -343,10 +343,10 @@ def test_interpolate_datetime_curve_post(self): values = [0, 1, 2, 3] interpolated = interpolate_curve(Curve((dates, values)), interpolation='steps-post') dates_interp = np.array([ - '2017-01-01T00:00:16.364011520', '2017-01-02T00:01:05.465745408', - '2017-01-02T00:01:05.465745408', '2017-01-02T23:59:37.128525824', - '2017-01-02T23:59:37.128525824', '2017-01-04T00:00:26.230259712', - '2017-01-04T00:00:26.230259712' + '2017-01-01T00:00:00', '2017-01-02T00:00:00', + '2017-01-02T00:00:00', '2017-01-03T00:00:00', + '2017-01-03T00:00:00', '2017-01-04T00:00:00', + '2017-01-04T00:00:00' ], dtype='datetime64[ns]') curve = Curve((dates_interp, [0, 0, 1, 1, 2, 2, 3])) self.assertEqual(interpolated, curve) From 67726a6d3d4d49486c9bc4476e9d541540f8210c Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 20 Sep 2019 11:17:59 +0200 Subject: [PATCH 16/52] Various fixes for xarray 0.13 compatibility (#3973) --- holoviews/core/data/xarray.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/holoviews/core/data/xarray.py b/holoviews/core/data/xarray.py index 99193430c5..297451642f 100644 --- a/holoviews/core/data/xarray.py +++ b/holoviews/core/data/xarray.py @@ -50,10 +50,11 @@ def shape(cls, dataset, gridded=False): if kd.name in array.dims][::-1] if not all(d in names for d in array.dims): array = np.squeeze(array) - try: - array = array.transpose(*names, transpose_coords=False) - except: - array = array.transpose(*names) # Handle old xarray + if len(names) > 1: + try: + array = array.transpose(*names, transpose_coords=False) + except: + array = array.transpose(*names) # Handle old xarray shape = array.shape if gridded: return shape @@ -218,6 +219,10 @@ def range(cls, dataset, dimension): da = dask_array_module() if da and isinstance(dmin, da.Array): dmin, dmax = da.compute(dmin, dmax) + if isinstance(dmin, np.ndarray) and dmin.shape == (): + dmin = dmin[()] + if isinstance(dmax, np.ndarray) and dmax.shape == (): + dmax = dmax[()] dmin = dmin if np.isscalar(dmin) or isinstance(dmin, util.datetime_types) else dmin.item() dmax = dmax if np.isscalar(dmax) or isinstance(dmax, util.datetime_types) else dmax.item() return dmin, dmax @@ -390,8 +395,10 @@ def ndloc(cls, dataset, indices): sampled = (all(isinstance(ind, np.ndarray) and ind.dtype.kind != 'b' for ind in adjusted_indices) and len(indices) == len(kdims)) if sampled or (all_scalar and len(indices) == len(kdims)): + import xarray as xr if all_scalar: isel = {k: [v] for k, v in isel.items()} - return dataset.data.isel_points(**isel).to_dataframe().reset_index() + selected = dataset.data.isel({k: xr.DataArray(v) for k, v in isel.items()}) + return selected.to_dataframe().reset_index() else: return dataset.data.isel(**isel) From 3c815ccc3c9055ce23d6aced13de684902fdd919 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 20 Sep 2019 11:34:29 +0200 Subject: [PATCH 17/52] Resolve dependent functions in Dynamic operations (#3975) --- holoviews/util/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/holoviews/util/__init__.py b/holoviews/util/__init__.py index 4f17b16abe..a87ad9a034 100644 --- a/holoviews/util/__init__.py +++ b/holoviews/util/__init__.py @@ -917,6 +917,11 @@ def _eval_kwargs(self): for k, v in self.p.kwargs.items(): if util.is_param_method(v): v = v() + elif isinstance(v, FunctionType) and hasattr(v, '_dinfo'): + deps = v._dinfo + args = (getattr(p.owner, p.name) for p in deps.get('dependencies', [])) + kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()} + v = v(*args, **kwargs) evaled_kwargs[k] = v return evaled_kwargs From f5ac179e07e93f4b5d0870c07321ac049147dc3e Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 20 Sep 2019 11:46:33 +0200 Subject: [PATCH 18/52] Correctly resample colorcet colormaps (#3977) --- holoviews/plotting/util.py | 51 ++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/holoviews/plotting/util.py b/holoviews/plotting/util.py index 7e6b07c164..d2795e590f 100644 --- a/holoviews/plotting/util.py +++ b/holoviews/plotting/util.py @@ -536,6 +536,20 @@ def map_colors(arr, crange, cmap, hex=True): return arr +def resample_palette(palette, ncolors, categorical, cmap_categorical): + """ + Resample the number of colors in a palette to the selected number. + """ + if len(palette) != ncolors: + if categorical and cmap_categorical: + palette = [palette[i%len(palette)] for i in range(ncolors)] + else: + lpad, rpad = -0.5, 0.49999999999 + indexes = np.linspace(lpad, (len(palette)-1)+rpad, ncolors) + palette = [palette[int(np.round(v))] for v in indexes] + return palette + + def mplcmap_to_palette(cmap, ncolors=None, categorical=False): """ Converts a matplotlib colormap to palette of RGB hex strings." @@ -564,6 +578,22 @@ def mplcmap_to_palette(cmap, ncolors=None, categorical=False): return [rgb2hex(c) for c in cmap(np.linspace(0, 1, ncolors))] +def colorcet_cmap_to_palette(cmap, ncolors=None, categorical=False): + from colorcet import palette + + categories = ['glasbey'] + + ncolors = ncolors or 256 + cmap_categorical = any(c in cmap for c in categories) + + if cmap.endswith('_r'): + palette = list(reversed(palette[cmap[:-2]])) + else: + palette = palette[cmap] + + return resample_palette(palette, ncolors, categorical, cmap_categorical) + + def bokeh_palette_to_palette(cmap, ncolors=None, categorical=False): from bokeh import palettes @@ -603,14 +633,7 @@ def bokeh_palette_to_palette(cmap, ncolors=None, categorical=False): palette = palette(ncolors) if reverse: palette = palette[::-1] - if len(palette) != ncolors: - if categorical and cmap_categorical: - palette = [palette[i%len(palette)] for i in range(ncolors)] - else: - lpad, rpad = -0.5, 0.49999999999 - indexes = np.linspace(lpad, (len(palette)-1)+rpad, ncolors) - palette = [palette[int(np.round(v))] for v in indexes] - return palette + return resample_palette(palette, ncolors, categorical, cmap_categorical) def linear_gradient(start_hex, finish_hex, n=10): @@ -879,16 +902,12 @@ def process_cmap(cmap, ncolors=None, provider=None, categorical=False): mpl_cmaps = _list_cmaps('matplotlib') bk_cmaps = _list_cmaps('bokeh') cet_cmaps = _list_cmaps('colorcet') - if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)): + if provider == 'matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)): palette = mplcmap_to_palette(cmap, ncolors, categorical) - elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)): + elif provider == 'bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)): palette = bokeh_palette_to_palette(cmap, ncolors, categorical) - elif provider=='colorcet' or (provider is None and cmap in cet_cmaps): - from colorcet import palette - if cmap.endswith('_r'): - palette = list(reversed(palette[cmap[:-2]])) - else: - palette = palette[cmap] + elif provider == 'colorcet' or (provider is None and cmap in cet_cmaps): + palette = colorcet_cmap_to_palette(cmap, ncolors, categorical) else: raise ValueError("Supplied cmap %s not found among %s colormaps." % (cmap,providers_checked)) From 3f411fa34f46cb4b8da1a0c4c8a14ccaab7a5d41 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 20 Sep 2019 12:29:37 +0200 Subject: [PATCH 19/52] Ensure that empty Stream source elements get remapped (#3978) --- holoviews/streams.py | 4 +++- holoviews/tests/teststreams.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/holoviews/streams.py b/holoviews/streams.py index b26d307621..cf379e9462 100644 --- a/holoviews/streams.py +++ b/holoviews/streams.py @@ -338,10 +338,12 @@ def source(self): @source.setter def source(self, source): - if self.source: + if self.source is not None: source_list = self.registry[self.source] if self in source_list: source_list.remove(self) + if not source_list: + self.registry.pop(self.source) if source is None: self._source = None diff --git a/holoviews/tests/teststreams.py b/holoviews/tests/teststreams.py index 94942b5558..cce76114aa 100644 --- a/holoviews/tests/teststreams.py +++ b/holoviews/tests/teststreams.py @@ -626,6 +626,15 @@ def test_source_empty_element(self): stream = PointerX(source=points) self.assertIs(stream.source, points) + def test_source_empty_element_remap(self): + points = Points([]) + stream = PointerX(source=points) + self.assertIs(stream.source, points) + curve = Curve([]) + stream.source = curve + self.assertNotIn(points, Stream.registry) + self.assertIn(curve, Stream.registry) + def test_source_empty_dmap(self): points_dmap = DynamicMap(lambda x: Points([]), kdims=['X']) stream = PointerX(source=points_dmap) From e7bfad92357fdde33c45faab755e0aaeb52b610a Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 20 Sep 2019 13:37:48 +0200 Subject: [PATCH 20/52] Ensure Dynamic utility subscribes to dependent function (#3980) --- doc/nbpublisher | 2 +- holoviews/core/accessors.py | 17 +++++++------- holoviews/core/util.py | 39 +++++++++++++++++++++++++++---- holoviews/streams.py | 9 +++---- holoviews/tests/core/testapply.py | 33 ++++++++++++++++++++++++++ holoviews/util/__init__.py | 24 +++++++------------ 6 files changed, 90 insertions(+), 34 deletions(-) diff --git a/doc/nbpublisher b/doc/nbpublisher index 90ed382834..0ffe6a0fde 160000 --- a/doc/nbpublisher +++ b/doc/nbpublisher @@ -1 +1 @@ -Subproject commit 90ed3828347afd8bb93cd3183733fedb26a214a4 +Subproject commit 0ffe6a0fde289cffe51efa3776565bfd75b5633d diff --git a/holoviews/core/accessors.py b/holoviews/core/accessors.py index 56d7bb8e7b..2e30020da3 100644 --- a/holoviews/core/accessors.py +++ b/holoviews/core/accessors.py @@ -4,6 +4,7 @@ from __future__ import absolute_import, unicode_literals from collections import OrderedDict +from types import FunctionType import param @@ -84,24 +85,22 @@ def function(object, **kwargs): params = {p: val for p, val in kwargs.items() if isinstance(val, param.Parameter) and isinstance(val.owner, param.Parameterized)} - param_methods = {p: val for p, val in kwargs.items() - if util.is_param_method(val, has_deps=True)} + + dependent_kws = any( + (isinstance(val, FunctionType) and hasattr(val, '_dinfo')) or + util.is_param_method(val, has_deps=True) for val in kwargs.values() + ) if dynamic is None: dynamic = (bool(streams) or isinstance(self._obj, DynamicMap) or util.is_param_method(function, has_deps=True) or - params or param_methods) + params or dependent_kws) if applies and dynamic: return Dynamic(self._obj, operation=function, streams=streams, kwargs=kwargs, link_inputs=link_inputs) elif applies: - inner_kwargs = dict(kwargs) - for k, v in kwargs.items(): - if util.is_param_method(v, has_deps=True): - inner_kwargs[k] = v() - elif k in params: - inner_kwargs[k] = getattr(v.owner, v.name) + inner_kwargs = util.resolve_dependent_kwargs(kwargs) if hasattr(function, 'dynamic'): inner_kwargs['dynamic'] = False return function(self._obj, **inner_kwargs) diff --git a/holoviews/core/util.py b/holoviews/core/util.py index 1418b7cdf1..f1ce35a38e 100644 --- a/holoviews/core/util.py +++ b/holoviews/core/util.py @@ -7,12 +7,14 @@ import string, fnmatch import unicodedata import datetime as dt + from collections import defaultdict -from functools import partial from contextlib import contextmanager from distutils.version import LooseVersion as _LooseVersion - +from functools import partial from threading import Thread, Event +from types import FunctionType + import numpy as np import param @@ -26,7 +28,7 @@ # Python3 compatibility if sys.version_info.major >= 3: import builtins as builtins # noqa (compatibility) - + basestring = str unicode = str long = int @@ -38,7 +40,7 @@ LooseVersion = _LooseVersion else: import __builtin__ as builtins # noqa (compatibility) - + basestring = basestring unicode = unicode from itertools import izip @@ -1482,6 +1484,35 @@ def is_param_method(obj, has_deps=False): return parameterized +def resolve_dependent_kwargs(kwargs): + """Resolves parameter dependencies in the supplied dictionary + + Resolves parameter values, Parameterized instance methods and + parameterized functions with dependencies in the supplied + dictionary. + + Args: + kwargs (dict): A dictionary of keyword arguments + + Returns: + A new dictionary with where any parameter dependencies have been + resolved. + """ + resolved = {} + for k, v in kwargs.items(): + if is_param_method(v, has_deps=True): + v = v() + elif isinstance(v, param.Parameter) and isinstance(v.owner, param.Parameterized): + v = getattr(v.owner, v.name) + elif isinstance(v, FunctionType) and hasattr(v, '_dinfo'): + deps = v._dinfo + args = (getattr(p.owner, p.name) for p in deps.get('dependencies', [])) + kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()} + v = v(*args, **kwargs) + resolved[k] = v + return resolved + + @contextmanager def disable_constant(parameterized): """ diff --git a/holoviews/streams.py b/holoviews/streams.py index cf379e9462..916abbfbf8 100644 --- a/holoviews/streams.py +++ b/holoviews/streams.py @@ -637,7 +637,7 @@ class Params(Stream): parameters = param.List([], constant=True, doc=""" Parameters on the parameterized to watch.""") - def __init__(self, parameterized=None, parameters=None, watch=True, **params): + def __init__(self, parameterized=None, parameters=None, watch=True, watch_only=False, **params): if util.param_version < '1.8.0' and watch: raise RuntimeError('Params stream requires param version >= 1.8.0, ' 'to support watching parameters.') @@ -657,6 +657,7 @@ def __init__(self, parameterized=None, parameters=None, watch=True, **params): rename.update({(o, k): v for o in owners}) params['rename'] = rename + self._watch_only = watch_only super(Params, self).__init__(parameterized=parameterized, parameters=parameters, **params) self._memoize_counter = 0 self._events = [] @@ -730,6 +731,8 @@ def update(self, **kwargs): @property def contents(self): + if self._watch_only: + return {} filtered = {(p.owner, p.name): getattr(p.owner, p.name) for p in self.parameters} return {self._rename.get((o, n), n): v for (o, n), v in filtered.items() if self._rename.get((o, n), True) is not None} @@ -752,11 +755,9 @@ def __init__(self, parameterized, parameters=None, watch=True, **params): parameterized = util.get_method_owner(parameterized) if not parameters: parameters = [p.pobj for p in parameterized.param.params_depended_on(method.__name__)] + params['watch_only'] = True super(ParamMethod, self).__init__(parameterized, parameters, watch, **params) - @property - def contents(self): - return {} diff --git a/holoviews/tests/core/testapply.py b/holoviews/tests/core/testapply.py index edfaaa3a17..0c83a633c1 100644 --- a/holoviews/tests/core/testapply.py +++ b/holoviews/tests/core/testapply.py @@ -109,6 +109,39 @@ def test_element_apply_param_method_with_dependencies(self): pinst.label = 'Another label' self.assertEqual(applied[()], self.element.relabel('Another label')) + def test_element_apply_function_with_dependencies(self): + pinst = ParamClass() + + @param.depends(pinst.param.label) + def get_label(label): + return label + '!' + + applied = self.element.apply('relabel', label=get_label) + + # Check stream + self.assertEqual(len(applied.streams), 1) + stream = applied.streams[0] + self.assertIsInstance(stream, Params) + self.assertEqual(stream.parameters, [pinst.param.label]) + + # Check results + self.assertEqual(applied[()], self.element.relabel('Test!')) + + # Ensure subscriber gets called + stream.add_subscriber(lambda **kwargs: applied[()]) + pinst.label = 'Another label' + self.assertEqual(applied.last, self.element.relabel('Another label!')) + + def test_element_apply_function_with_dependencies_non_dynamic(self): + pinst = ParamClass() + + @param.depends(pinst.param.label) + def get_label(label): + return label + '!' + + applied = self.element.apply('relabel', dynamic=False, label=get_label) + self.assertEqual(applied, self.element.relabel('Test!')) + def test_element_apply_dynamic_with_param_method(self): pinst = ParamClass() applied = self.element.apply(lambda x, label: x.relabel(label), label=pinst.dynamic_label) diff --git a/holoviews/util/__init__.py b/holoviews/util/__init__.py index a87ad9a034..e927f7a223 100644 --- a/holoviews/util/__init__.py +++ b/holoviews/util/__init__.py @@ -894,6 +894,12 @@ def _get_streams(self, map_obj, watch=True): for value in self.p.kwargs.values(): if util.is_param_method(value, has_deps=True): streams.append(value) + elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'): + dependencies = list(value._dinfo.get('dependencies', [])) + dependencies += list(value._dinfo.get('kwargs', {}).values()) + params = [d for d in dependencies if isinstance(d, param.Parameter) + and isinstance(d.owner, param.Parameterized)] + streams.append(Params(parameters=params, watch_only=True)) valid, invalid = Stream._process_streams(streams) if invalid: @@ -911,20 +917,6 @@ def _process(self, element, key=None, kwargs={}): else: return self.p.operation(element, **kwargs) - def _eval_kwargs(self): - """Evaluates any parameterized methods in the kwargs""" - evaled_kwargs = {} - for k, v in self.p.kwargs.items(): - if util.is_param_method(v): - v = v() - elif isinstance(v, FunctionType) and hasattr(v, '_dinfo'): - deps = v._dinfo - args = (getattr(p.owner, p.name) for p in deps.get('dependencies', [])) - kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()} - v = v(*args, **kwargs) - evaled_kwargs[k] = v - return evaled_kwargs - def _dynamic_operation(self, map_obj): """ Generate function to dynamically apply the operation. @@ -932,12 +924,12 @@ def _dynamic_operation(self, map_obj): """ if not isinstance(map_obj, DynamicMap): def dynamic_operation(*key, **kwargs): - kwargs = dict(self._eval_kwargs(), **kwargs) + kwargs = dict(util.resolve_dependent_kwargs(self.p.kwargs), **kwargs) obj = map_obj[key] if isinstance(map_obj, HoloMap) else map_obj return self._process(obj, key, kwargs) else: def dynamic_operation(*key, **kwargs): - kwargs = dict(self._eval_kwargs(), **kwargs) + kwargs = dict(util.resolve_dependent_kwargs(self.p.kwargs), **kwargs) if map_obj._posarg_keys and not key: key = tuple(kwargs[k] for k in map_obj._posarg_keys) return self._process(map_obj[key], key, kwargs) From 9d07bafd787c012c3babc1c9fede72a66ffa4367 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Sun, 22 Sep 2019 21:33:30 +0200 Subject: [PATCH 21/52] Updated cross-selector example to use Panel (#3960) --- examples/gallery/apps/bokeh/crossfilter.py | 64 ++++++---------------- 1 file changed, 18 insertions(+), 46 deletions(-) diff --git a/examples/gallery/apps/bokeh/crossfilter.py b/examples/gallery/apps/bokeh/crossfilter.py index 8d71002992..82cd20c409 100644 --- a/examples/gallery/apps/bokeh/crossfilter.py +++ b/examples/gallery/apps/bokeh/crossfilter.py @@ -1,28 +1,20 @@ """ -An example demonstrating how to put together a crossfilter app based -on the Auto MPG dataset. Demonstrates how to dynamically generate -bokeh plots using the HoloViews API and replacing the bokeh plot -based on the current widget selections. +An example demonstrating how to put together a cross-selector app based +on the Auto MPG dataset. """ import holoviews as hv +import panel as pn +import panel.widgets as pnw -from bokeh.layouts import row, widgetbox -from bokeh.models import Select -from bokeh.plotting import curdoc from bokeh.sampledata.autompg import autompg df = autompg.copy() -SIZES = list(range(6, 22, 3)) ORIGINS = ['North America', 'Europe', 'Asia'] # data cleanup -df.cyl = [str(x) for x in df.cyl] df.origin = [ORIGINS[x-1] for x in df.origin] -df['year'] = [str(x) for x in df.yr] -del df['yr'] - df['mfr'] = [x.split()[0] for x in df.name] df.loc[df.mfr=='chevy', 'mfr'] = 'chevrolet' df.loc[df.mfr=='chevroelt', 'mfr'] = 'chevrolet' @@ -38,40 +30,20 @@ continuous = [x for x in columns if x not in discrete] quantileable = [x for x in continuous if len(df[x].unique()) > 20] -renderer = hv.renderer('bokeh') -options = hv.Store.options(backend='bokeh') -options.Points = hv.Options('plot', width=800, height=600, size_index=None,) -options.Points = hv.Options('style', cmap='rainbow', line_color='black') - -def create_figure(): - label = "%s vs %s" % (x.value.title(), y.value.title()) - kdims = [x.value, y.value] - - opts, style = {}, {} - opts['color_index'] = color.value if color.value != 'None' else None - if size.value != 'None': - opts['size_index'] = size.value - opts['scaling_factor'] = (1./df[size.value].max())*200 - points = hv.Points(df, kdims=kdims, label=label).opts(plot=opts, style=style) - return renderer.get_plot(points).state - -def update(attr, old, new): - layout.children[1] = create_figure() - -x = Select(title='X-Axis', value='mpg', options=quantileable) -x.on_change('value', update) - -y = Select(title='Y-Axis', value='hp', options=quantileable) -y.on_change('value', update) - -size = Select(title='Size', value='None', options=['None'] + quantileable) -size.on_change('value', update) +x = pnw.Select(name='X-Axis', value='mpg', options=quantileable) +y = pnw.Select(name='Y-Axis', value='hp', options=quantileable) +size = pnw.Select(name='Size', value='None', options=['None'] + quantileable) +color = pnw.Select(name='Color', value='None', options=['None'] + quantileable) -color = Select(title='Color', value='None', options=['None'] + quantileable) -color.on_change('value', update) +@pn.depends(x.param.value, y.param.value, color.param.value, size.param.value) +def create_figure(x, y, color, size): + opts = dict(cmap='rainbow', width=800, height=600, padding=0.1, line_color='black') + if color != 'None': + opts['color'] = color + if size != 'None': + opts['size'] = hv.dim(size).norm()*20 + return hv.Points(df, [x, y], label="%s vs %s" % (x.title(), y.title())).opts(**opts) -controls = widgetbox([x, y, color, size], width=200) -layout = row(controls, create_figure()) +widgets = pn.WidgetBox(x, y, color, size, width=200) -curdoc().add_root(layout) -curdoc().title = "Crossfilter" +pn.Row(widgets, create_figure).servable('Cross-selector') From 957f73718eb399aa3f4557951d62f76fd8e8363a Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 24 Sep 2019 13:10:18 +0200 Subject: [PATCH 22/52] Fix apply method on HoloMap (#3989) --- holoviews/core/accessors.py | 4 ++-- holoviews/tests/core/testapply.py | 9 ++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/holoviews/core/accessors.py b/holoviews/core/accessors.py index 2e30020da3..70fe76d9dd 100644 --- a/holoviews/core/accessors.py +++ b/holoviews/core/accessors.py @@ -81,7 +81,7 @@ def function(object, **kwargs): method_name) return method(*args, **kwargs) - applies = isinstance(self._obj, (ViewableElement, HoloMap)) + applies = isinstance(self._obj, ViewableElement) params = {p: val for p, val in kwargs.items() if isinstance(val, param.Parameter) and isinstance(val.owner, param.Parameterized)} @@ -96,7 +96,7 @@ def function(object, **kwargs): util.is_param_method(function, has_deps=True) or params or dependent_kws) - if applies and dynamic: + if (applies or isinstance(self._obj, HoloMap)) and dynamic: return Dynamic(self._obj, operation=function, streams=streams, kwargs=kwargs, link_inputs=link_inputs) elif applies: diff --git a/holoviews/tests/core/testapply.py b/holoviews/tests/core/testapply.py index 0c83a633c1..91c10952b0 100644 --- a/holoviews/tests/core/testapply.py +++ b/holoviews/tests/core/testapply.py @@ -1,7 +1,8 @@ +import numpy as np import param from holoviews.core.spaces import DynamicMap, HoloMap -from holoviews.element import Curve +from holoviews.element import Image, Curve from holoviews.element.comparison import ComparisonTestCase from holoviews.streams import Params, ParamMethod @@ -158,6 +159,12 @@ def test_element_apply_dynamic_with_param_method(self): pinst.label = 'Another label' self.assertEqual(applied[()], self.element.relabel('Another label!')) + def test_holomap_apply_with_method(self): + hmap = HoloMap({i: Image(np.array([[i, 2], [3, 4]])) for i in range(3)}) + reduced = hmap.apply.reduce(x=np.min) + + expected = HoloMap({i: Curve([(-0.25, 3), (0.25, i)], 'y', 'z') for i in range(3)}) + self.assertEqual(reduced, expected) From aa8504fb2e03debff9e435098acfef39b30de30a Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 1 Oct 2019 22:44:01 +0200 Subject: [PATCH 23/52] Fixed hover bug when plotting on inverted axis (#4010) --- holoviews/plotting/bokeh/raster.py | 5 +++-- holoviews/tests/plotting/bokeh/testrasterplot.py | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/holoviews/plotting/bokeh/raster.py b/holoviews/plotting/bokeh/raster.py index 3c90573a25..fc6ff444c6 100644 --- a/holoviews/plotting/bokeh/raster.py +++ b/holoviews/plotting/bokeh/raster.py @@ -89,11 +89,11 @@ def get_data(self, element, ranges, style): if self.invert_axes: l, b, r, t = b, l, t, r + dh, dw = t-b, r-l if self.invert_xaxis: l, r = r, l if self.invert_yaxis: b, t = t, b - dh, dw = t-b, r-l data = dict(x=[l], y=[b], dw=[dw], dh=[dh]) for i, vdim in enumerate(element.vdims, 2): @@ -164,13 +164,14 @@ def get_data(self, element, ranges, style): if self.invert_axes: img = img.T l, b, r, t = b, l, t, r + + dh, dw = t-b, r-l if self.invert_xaxis: l, r = r, l img = img[:, ::-1] if self.invert_yaxis: img = img[::-1] b, t = t, b - dh, dw = t-b, r-l if 0 in img.shape: img = np.zeros((1, 1), dtype=np.uint32) diff --git a/holoviews/tests/plotting/bokeh/testrasterplot.py b/holoviews/tests/plotting/bokeh/testrasterplot.py index 90a58eb21a..b04a624ffc 100644 --- a/holoviews/tests/plotting/bokeh/testrasterplot.py +++ b/holoviews/tests/plotting/bokeh/testrasterplot.py @@ -30,7 +30,7 @@ def test_raster_invert_axes(self): self.assertEqual(source.data['x'][0], 0) self.assertEqual(source.data['y'][0], 3) self.assertEqual(source.data['dw'][0], 2) - self.assertEqual(source.data['dh'][0], -3) + self.assertEqual(source.data['dh'][0], 3) def test_image_invert_axes(self): arr = np.array([[0, 1, 2], [3, 4, 5]]) @@ -54,7 +54,7 @@ def test_image_invert_xaxis(self): self.assertEqual(cdata['x'], [0.5]) self.assertEqual(cdata['y'], [-0.5]) self.assertEqual(cdata['dh'], [1.0]) - self.assertEqual(cdata['dw'], [-1.0]) + self.assertEqual(cdata['dw'], [1.0]) self.assertEqual(cdata['image'][0], arr[::-1, ::-1]) def test_image_invert_yaxis(self): @@ -67,7 +67,7 @@ def test_image_invert_yaxis(self): cdata = plot.handles['source'].data self.assertEqual(cdata['x'], [-0.5]) self.assertEqual(cdata['y'], [0.5]) - self.assertEqual(cdata['dh'], [-1.0]) + self.assertEqual(cdata['dh'], [1.0]) self.assertEqual(cdata['dw'], [1.0]) self.assertEqual(cdata['image'][0], arr) @@ -81,7 +81,7 @@ def test_rgb_invert_xaxis(self): self.assertEqual(cdata['x'], [0.5]) self.assertEqual(cdata['y'], [-0.5]) self.assertEqual(cdata['dh'], [1.0]) - self.assertEqual(cdata['dw'], [-1.0]) + self.assertEqual(cdata['dw'], [1.0]) def test_rgb_invert_yaxis(self): rgb = RGB(np.random.rand(10, 10, 3)).opts(plot=dict(invert_yaxis=True)) @@ -92,5 +92,5 @@ def test_rgb_invert_yaxis(self): cdata = plot.handles['source'].data self.assertEqual(cdata['x'], [-0.5]) self.assertEqual(cdata['y'], [0.5]) - self.assertEqual(cdata['dh'], [-1.0]) + self.assertEqual(cdata['dh'], [1.0]) self.assertEqual(cdata['dw'], [1.0]) From 371ff61c24458460e63a23a32f17c9d58a54b4e2 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 1 Oct 2019 22:44:13 +0200 Subject: [PATCH 24/52] Ensure that DynamicMap.groupby links inputs (#4012) --- holoviews/core/spaces.py | 7 +++++-- holoviews/tests/core/testdynamic.py | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/holoviews/core/spaces.py b/holoviews/core/spaces.py index cb780aa945..9beb284c29 100644 --- a/holoviews/core/spaces.py +++ b/holoviews/core/spaces.py @@ -1675,8 +1675,11 @@ def inner_fn(outer_vals, *key, **dynkwargs): inner_vals = [(d.name, k) for d, k in inner_dims] return self.select(**dict(outer_vals+inner_vals)).last if inner_kdims or self.streams: - group = self.clone(callback=partial(inner_fn, outer_vals), - kdims=inner_kdims) + callback = Callable(partial(inner_fn, outer_vals), + inputs=[self]) + group = self.clone( + callback=callback, kdims=inner_kdims + ) else: group = inner_fn(outer_vals, ()) groups.append((outer, group)) diff --git a/holoviews/tests/core/testdynamic.py b/holoviews/tests/core/testdynamic.py index 495543a8df..8dfbfa68db 100644 --- a/holoviews/tests/core/testdynamic.py +++ b/holoviews/tests/core/testdynamic.py @@ -332,6 +332,9 @@ def plot_function(mydim, data): self.assertIsInstance(ndlayout[0], DynamicMap) data = np.array([(0, 0, 0), (1, 1, 1), (2, 2, 2)]) buff.send(data) + self.assertIs(ndlayout[0].callback.inputs[0], dmap) + self.assertIs(ndlayout[1].callback.inputs[0], dmap) + self.assertIs(ndlayout[2].callback.inputs[0], dmap) self.assertEqual(ndlayout[0][()], Scatter([(0, 0)])) self.assertEqual(ndlayout[1][()], Scatter([(1, 1)])) self.assertEqual(ndlayout[2][()], Scatter([(2, 2)])) From 9768a57a9d03ef6b935de2bb1d102dc9089f0c49 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 1 Oct 2019 23:13:16 +0200 Subject: [PATCH 25/52] Do not switch backend when loading backend via renderer (#4013) --- holoviews/util/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/holoviews/util/__init__.py b/holoviews/util/__init__.py index e927f7a223..8278805968 100644 --- a/holoviews/util/__init__.py +++ b/holoviews/util/__init__.py @@ -631,7 +631,11 @@ def renderer(name): """ try: if name not in Store.renderers: + if Store.current_backend: + prev_backend = Store.current_backend extension(name) + if prev_backend: + Store.set_current_backend(prev_backend) return Store.renderers[name] except ImportError: msg = ('Could not find a {name!r} renderer, available renderers are: {available}.') From f0ad95f8c765c1f04ca92768e25cc9ba96854b17 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 1 Oct 2019 23:22:41 +0200 Subject: [PATCH 26/52] Ensure Point/Scatter plot correctly categorizes data on inverted axes (#4014) --- holoviews/plotting/bokeh/chart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/holoviews/plotting/bokeh/chart.py b/holoviews/plotting/bokeh/chart.py index 834ef3cf8e..3b9eb6e7c3 100644 --- a/holoviews/plotting/bokeh/chart.py +++ b/holoviews/plotting/bokeh/chart.py @@ -98,7 +98,7 @@ def get_data(self, element, ranges, style): xdim, ydim = dims[xidx], dims[yidx] data[xdim] = element.dimension_values(xidx) data[ydim] = element.dimension_values(yidx) - self._categorize_data(data, (xdim, ydim), element.dimensions()) + self._categorize_data(data, dims[:2], element.dimensions()) cdata, cmapping = self._get_color_data(element, ranges, style) data.update(cdata) From a2aab570f6e894a57d0cffb373194671b57593d5 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Wed, 2 Oct 2019 19:12:43 +0200 Subject: [PATCH 27/52] Remove topics notebooks (#3984) --- examples/topics/geometry/lsystems.ipynb | 535 --------------- examples/topics/geometry/square_limit.ipynb | 290 -------- examples/topics/simulation/boids.ipynb | 254 ------- .../topics/simulation/hipster_dynamics.ipynb | 345 ---------- examples/topics/simulation/sri_model.ipynb | 646 ------------------ 5 files changed, 2070 deletions(-) delete mode 100644 examples/topics/geometry/lsystems.ipynb delete mode 100644 examples/topics/geometry/square_limit.ipynb delete mode 100644 examples/topics/simulation/boids.ipynb delete mode 100755 examples/topics/simulation/hipster_dynamics.ipynb delete mode 100755 examples/topics/simulation/sri_model.ipynb diff --git a/examples/topics/geometry/lsystems.ipynb b/examples/topics/geometry/lsystems.ipynb deleted file mode 100644 index 7ed219e841..0000000000 --- a/examples/topics/geometry/lsystems.ipynb +++ /dev/null @@ -1,535 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# L-Systems\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A [Lindenmayer system](https://en.wikipedia.org/wiki/L-system) or L-system is a mathematical system that can be used to describe growth process such as the growth of plants. Formally, it is a symbol expansion system whereby [rewrite rules](https://en.wikipedia.org/wiki/Rewriting) are applies iteratively to generate a longer string of symbols starting from a simple initial state. In this notebook, we will see how various types of fractal, including plant-like ones can be generated with L-systems and visualized with HoloViews." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import holoviews as hv\n", - "import numpy as np\n", - "hv.extension('bokeh')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook makes extensive use of the ``Path`` element and we will want to keep equal aspects and suppress the axes:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%opts Path {+framewise +axiswise} [xaxis=None, yaxis=None show_title=False] (color='black')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Some simple patterns" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this notebook, we will be drawing paths relative to an agent, in the spirit of [turtle graphics](https://en.wikipedia.org/wiki/Turtle_graphics). For this we define a simple agent class that has a ``path`` property to show us the path travelled from the point of initialization:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class SimpleAgent(object):\n", - " \n", - " def __init__(self, x=0,y=0, heading=0):\n", - " self.x, self.y = x,y\n", - " self.heading = heading\n", - " self.trace = [(self.x, self.y)]\n", - " \n", - " def forward(self, distance):\n", - " self.x += np.cos(2*np.pi * self.heading/360.0)\n", - " self.y += np.sin(2*np.pi * self.heading/360.0)\n", - " self.trace.append((self.x,self.y))\n", - " \n", - " def rotate(self, angle):\n", - " self.heading += angle\n", - " \n", - " def back(self, distance):\n", - " self.heading += 180\n", - " self.forward(distance)\n", - " self.heading += 180\n", - " \n", - " @property\n", - " def path(self):\n", - " return hv.Path([self.trace])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now test our ``SimpleAgent`` by drawing some spirographs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def pattern(angle= 5):\n", - " agent = SimpleAgent()\n", - " for i in range(360//angle):\n", - " for i in range(4):\n", - " agent.forward(1)\n", - " agent.rotate(90)\n", - " agent.rotate(angle)\n", - " return agent\n", - " \n", - "(pattern(20).path + pattern(10).path + pattern(5).path\n", - " + pattern(5).path * pattern(10).path * pattern(20).path).cols(2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "We can also draw some pretty rose patterns, adapted from [these equations](http://www.mathcats.com/gallery/fiverosedetails.html):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def roses(l,n,k):\n", - " agent = SimpleAgent()\n", - " n * 10\n", - " x = (2.0 * k -n) / (2.0 * n)\n", - " for i in range(360*n):\n", - " agent.forward(l)\n", - " agent.rotate(i + x)\n", - " return agent\n", - "\n", - "roses(5, 7, 3).path + roses(5, 12, 5).path" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Following rules" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now want to the capabilites of our agent with the ability to read instructions, telling it which path to follow. Let's define the meaning of the following symbols:\n", - "\n", - "**F**: Move forward by a pre-specified distance.
\n", - "**B**: Move backwards by a pre-specified distance.
\n", - "**+**: Rotate anti-clockwise by a pre-specified angle.
\n", - "**-**: Rotate clockwise by a pre-specified angle.
\n", - "\n", - "Here is an agent class that can read strings of such symbols to draw the corresponding pattern:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class Agent(SimpleAgent):\n", - " \"An upgraded agent that can follow some rules\"\n", - " \n", - " default_rules = {'F': lambda t,d,a: t.forward(d),\n", - " 'B': lambda t,d,a: t.back(d),\n", - " '+': lambda t,d,a: t.rotate(-a),\n", - " '-': lambda t,d,a: t.rotate(a)}\n", - " \n", - " def __init__(self, x=0,y=0, instructions=None, heading=0, \n", - " distance=5, angle=60, rules=default_rules):\n", - " super(Agent,self).__init__(x,y, heading)\n", - " self.distance = distance\n", - " self.angle = angle\n", - " self.rules = rules\n", - " if instructions: self.process(instructions, self.distance, self.angle)\n", - " \n", - " def process(self, instructions, distance, angle):\n", - " for i in instructions: \n", - " self.rules[i](self, distance, angle)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining L-Systems" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "L-systems are defined with a [rewrite system](https://en.wikipedia.org/wiki/Rewriting), making use of a set of [production rules](https://en.wikipedia.org/wiki/Production_(computer_science)). What this means is that L-systems can generate instructions for our agent to follow, and therefore generate paths.\n", - "\n", - "Now we define the ``expand_rules`` function which can process some expansion rules to repeatedly substitute an initial set of symbols with new symbols:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def expand_rules(initial, iterations, productions):\n", - " \"Expand an initial symbol with the given production rules\"\n", - " expansion = initial\n", - " for i in range(iterations):\n", - " intermediate = \"\"\n", - " for ch in expansion:\n", - " intermediate = intermediate + productions.get(ch,ch)\n", - " expansion = intermediate\n", - " return expansion" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Koch curve and snowflake" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To demonstrate ``expand_rules``, let's define two different rules:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "koch_curve = {'F':'F+F-F-F+F'} # Replace 'F' with 'F+F-F-F+F'\n", - "koch_snowflake = {'F':'F-F++F-F'} # Replace 'F' with 'F-F++F-F'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here are the first three steps using the first rule:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(3):\n", - " print('%d: %s' % (i, expand_rules('F', i, koch_curve)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that these are instructions our agent can follow!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Path {+axiswise} (color=Cycle())\n", - "k1 = Agent(-200, 0, expand_rules('F', 4, koch_curve), angle=90).path\n", - "k2 = Agent(-200, 0, expand_rules('F', 4, koch_snowflake)).path\n", - "k1 + k2 + (k1 * k2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This shows two variants of the [Koch snowflake](https://en.wikipedia.org/wiki/Koch_snowflake) where ``koch_curve`` is a variant that uses right angles." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Sierpinski triangle" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following example introduces a mutual relationship between two symbols, 'A' and 'B', instead of just the single symbol 'F' used above:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sierpinski_triangle = {'A':'B-A-B', 'B':'A+B+A'}\n", - "for i in range(3):\n", - " print('%d: %s' % (i, expand_rules('A', i,sierpinski_triangle)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once again we can use these instructions to draw an interesting shape although we also need to define what these symbols mean to our agent:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Path (color='green')\n", - "sierpinski_rules = {'A': lambda t,d,a: t.forward(d),\n", - " 'B': lambda t,d,a: t.forward(d),\n", - " '+': lambda t,d,a: t.rotate(-a),\n", - " '-': lambda t,d,a: t.rotate(a)}\n", - "\n", - "instructions = expand_rules('A', 9,sierpinski_triangle)\n", - "Agent(x=-200, y=0, rules=sierpinski_rules, instructions=instructions, angle=60).path" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We see that with our L-system expansion in terms of 'A' and 'B', we have defined the famous [Sierpinski_triangle](https://en.wikipedia.org/wiki/Sierpinski_triangle) fractal." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### The Dragon curve" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now for another famous fractal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dragon_curve = {'X':'X+YF+', 'Y':'-FX-Y'}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now have two new symbols 'X' and 'Y' which we need to define in addition to 'F', '+' and '-' which we used before:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dragon_rules = dict(Agent.default_rules, X=lambda t,d,a: None, Y=lambda t,d,a: None)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that 'X' and 'Y' don't actual do anything directly! These symbols are important in the expansion process but have no meaning to the agent. This time, let's use a ``HoloMap`` to view the expansion:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Path {+framewise}\n", - "\n", - "def pad_extents(path):\n", - " \"Add 5% padding around the path\"\n", - " minx, maxx = path.range('x')\n", - " miny, maxy = path.range('y')\n", - " xpadding = ((maxx-minx) * 0.1)/2\n", - " ypadding = ((maxy-miny) * 0.1)/2\n", - " path.extents = (minx-xpadding, miny-ypadding, maxx+xpadding, maxy+ypadding)\n", - " return path\n", - " \n", - "hmap = hv.HoloMap(kdims='Iteration')\n", - "for i in range(7,17):\n", - " path = Agent(-200, 0, expand_rules('FX', i, dragon_curve), rules=dragon_rules, angle=90).path\n", - " hmap[i] = pad_extents(path)\n", - "hmap" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This fractal is known as the [Dragon Curve](https://en.wikipedia.org/wiki/Dragon_curve)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Plant fractals" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have seen how to generate various fractals with L-systems, but we have not yet seen the plant-like fractals that L-systems are most famous for. This is because we can't draw a realistic plant with a single unbroken line: we need to be able to draw some part of the plant then jump back to an earlier state.\n", - "\n", - "This can be achieved by adding two new actions to our agent: ``push`` to record the current state of the agent and ``pop`` to pop back to the state of the last push:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class AgentWithState(Agent):\n", - " \"Stateful agent that can follow instructions\"\n", - " \n", - " def __init__(self, x,y, instructions, **kwargs):\n", - " super(AgentWithState, self).__init__(x=x,y=y, instructions=None, **kwargs)\n", - " self.traces = []\n", - " self.state = []\n", - " self.process(instructions, self.distance, self.angle)\n", - " \n", - " def push(self):\n", - " self.traces.append(self.trace[:])\n", - " self.state.append((self.heading, self.x, self.y))\n", - " \n", - " def pop(self):\n", - " self.traces.append(self.trace[:])\n", - " [self.heading, self.x, self.y] = self.state.pop()\n", - " self.trace = [(self.x, self.y)]\n", - " \n", - " @property\n", - " def path(self):\n", - " traces = self.traces + [self.trace]\n", - " return hv.Path(traces)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's look at the first three expansions of a new ruleset we will use to generate a plant-like fractal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plant_fractal = {'X':'F-[[X]+X]+F[+FX]-X', 'F':'FF'}\n", - "for i in range(3):\n", - " print('%d: %s' % (i, expand_rules('X', i, plant_fractal)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The new symbols '[' and ']' correspond to the new push and pop state actions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plant_rules = dict(Agent.default_rules, X=lambda t,d,a: None, \n", - " **{'[': lambda t,d,a: t.push(), ']': lambda t,d,a: t.pop()})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now generate a nice plant-like fractal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Path {+framewise} (color='g' line_width=1)\n", - "hmap = hv.HoloMap(kdims='Iteration')\n", - "for i in range(7):\n", - " instructions = expand_rules('X', i, plant_fractal)\n", - " if i > 2:\n", - " hmap[i] = AgentWithState(-200, 0, instructions, heading=90, rules=plant_rules, angle=25).path\n", - "hmap" - ] - } - ], - "metadata": { - "language_info": { - "name": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/examples/topics/geometry/square_limit.ipynb b/examples/topics/geometry/square_limit.ipynb deleted file mode 100644 index 7c747abd51..0000000000 --- a/examples/topics/geometry/square_limit.ipynb +++ /dev/null @@ -1,290 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Square Limit\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The above image shows a famous woodcut by [M.C. Escher](https://en.wikipedia.org/wiki/M._C._Escher) called [Square Limit](https://www.wikiart.org/en/m-c-escher/square-limit) composed of tesselating fish tiles. In this notebook, we will recreate this pattern using the HoloViews ``Spline`` element.\n", - "\n", - "The construction used here is that of Peter Henderson's [Functional Geometry](https://eprints.soton.ac.uk/257577/1/funcgeo2.pdf) paper and this notebook was inspired by Massimo Santini's [programming-with-escher](https://mapio.github.io/programming-with-escher/) notebook, itself inspired by [Haskell](https://github.com/micahhahn/FunctionalGeometry) and [Julia](https://shashi.github.io/ijulia-notebooks/funcgeo/) implementations.\n", - "\n", - "We start by importing HoloViews and NumPy and loading the extension:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import holoviews as hv\n", - "import numpy as np\n", - "hv.extension('matplotlib')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook makes extensive use of the ``Spline`` element and we will want to keep equal aspects and suppress the axes:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%opts Spline [xaxis=None yaxis=None aspect='equal' bgcolor='white'] (linewidth=0.8)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "'Square Limit' is composed from the following fish pattern, over which we show the unit square:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "spline=[(0.0,1.0),(0.08,0.98),(0.22,0.82),(0.29,0.72),(0.29,0.72),(0.3,0.64),(0.29,0.57),(0.3,0.5),\n", - "(0.3,0.5),(0.34,0.4),(0.43,0.32),(0.5,0.26),(0.5,0.26),(0.58,0.21),(0.66,0.22),(0.76,0.2),(0.76,0.2),\n", - "(0.82,0.12),(0.94,0.05),(1.0,0.0),(1.0,0.0),(0.9,0.03),(0.81,0.04),(0.76,0.05),(0.76,0.05),(0.69,0.04),\n", - "(0.62,0.04),(0.55,0.04),(0.55,0.04),(0.49,0.1),(0.4,0.17),(0.35,0.2),(0.35,0.2),(0.29,0.24),(0.19,0.28),\n", - "(0.14,0.31),(0.14,0.31),(0.09,0.35),(-0.03,0.43),(-0.05,0.72),(-0.05,0.72),(-0.04,0.82),(-0.02,0.95),(0.0,1.0),\n", - "(0.1,0.85),(0.14,0.82),(0.18,0.78),(0.18,0.75),(0.18,0.75),(0.16,0.74),(0.14,0.73),(0.12,0.73),(0.12,0.73),\n", - "(0.11,0.77),(0.11,0.81),(0.1,0.85),(0.05,0.82),(0.1,0.8),(0.08,0.74),(0.09,0.7),(0.09,0.7),(0.07,0.68),\n", - "(0.06,0.66),(0.04,0.67),(0.04,0.67),(0.04,0.73),(0.04,0.81),(0.05,0.82),(0.11,0.7),(0.16,0.56),(0.24,0.39),\n", - "(0.3,0.34),(0.3,0.34),(0.41,0.22),(0.62,0.16),(0.8,0.08),(0.23,0.8),(0.35,0.8),(0.44,0.78),(0.5,0.75),\n", - "(0.5,0.75),(0.5,0.67),(0.5,0.59),(0.5,0.51),(0.5,0.51),(0.46,0.47),(0.42,0.43),(0.38,0.39),(0.29,0.71),\n", - "(0.36,0.74),(0.43,0.73),(0.48,0.69),(0.34,0.61),(0.38,0.66),(0.44,0.64),(0.48,0.63),(0.34,0.51),(0.38,0.56),\n", - "(0.41,0.58),(0.48,0.57),(0.45,0.42),(0.46,0.4),(0.47,0.39),(0.48,0.39),(0.42,0.39),(0.43,0.36),(0.46,0.32),\n", - "(0.48,0.33),(0.25,0.26),(0.17,0.17),(0.08,0.09),(0.0,0.01),(0.0,0.01),(-0.08,0.09),(-0.17,0.18),(-0.25,0.26),\n", - "(-0.25,0.26),(-0.2,0.37),(-0.11,0.47),(-0.03,0.57),(-0.17,0.26),(-0.13,0.34),(-0.08,0.4),(-0.01,0.44),\n", - "(-0.12,0.21),(-0.07,0.29),(-0.02,0.34),(0.05,0.4),(-0.06,0.14),(-0.03,0.23),(0.03,0.28),(0.1,0.34),(-0.02,0.08),\n", - "(0.02,0.16),(0.09,0.23),(0.16,0.3)]\n", - "\n", - "unitsquare = hv.Bounds((0,0,1,1))\n", - "fish = hv.Spline((spline, [1,4,4,4]*34)) # Cubic splines\n", - "fish * unitsquare" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As you may expect, we will be applying a number of different geometric transforms to generate 'Square Limit'. To do this we will use ``Affine2D`` from ``matplotlib.transforms`` and ``matplotlib.path.Path`` (not to be confused with ``hv.Path``!)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib.path import Path\n", - "from matplotlib.transforms import Affine2D\n", - "\n", - "# Define some Affine2D transforms\n", - "rotT = Affine2D().rotate_deg(90).translate(1, 0)\n", - "rot45T = Affine2D().rotate_deg(45).scale(1. / np.sqrt(2.), 1. / np.sqrt(2.)).translate(1 / 2., 1 / 2.)\n", - "flipT = Affine2D().scale(-1, 1).translate(1, 0)\n", - "\n", - "def combine(obj):\n", - " \"Collapses overlays of Splines to allow transforms of compositions\"\n", - " if not isinstance(obj, hv.Overlay): return obj\n", - " return hv.Spline((np.vstack([el.data[0] for el in obj.values()]),\n", - " np.hstack([el.data[1] for el in obj.values()])))\n", - " \n", - "def T(spline, transform):\n", - " \"Apply a transform to a spline or overlay of splines\"\n", - " spline = combine(spline) \n", - " result = Path(spline.data[0], codes=spline.data[1]).transformed(transform)\n", - " return hv.Spline((result.vertices, result.codes))\n", - "\n", - "# Some simple transform functions we will be using\n", - "def rot(el): return T(el,rotT)\n", - "def rot45(el): return T(el, rot45T)\n", - "def flip(el): return T(el, flipT)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we define three ``Affine2D`` transforms (``rotT``,``rot45T`` and ``flipT``), a function to collapse HoloViews ``Spline`` overlays (built with the ``*`` operator) in a single ``Spline`` element, a generic transform function ``T`` and the three convenience functions we will be using directly (``rot``, ``rot45`` and ``flip``). Respectively, these functions rotate the spline by $90^o$, rotate the spline by $45^o$ and flip the spline horizontally.\n", - "\n", - "Here is a simple example of a possible tesselation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fish * rot(rot(fish))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next we need two functions, ``beside`` and ``above`` to place splines next to each other or one above the other, while compressing appropriately along the relevant axis:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def beside(spline1, spline2, n=1, m=1):\n", - " den = n + m\n", - " t1 = Affine2D().scale(n / den, 1)\n", - " t2 = Affine2D().scale(m / den, 1).translate(n / den, 0)\n", - " return combine(T(spline1, t1) * T(spline2, t2))\n", - "\n", - "def above(spline1, spline2, n=1, m=1):\n", - " den = n + m\n", - " t1 = Affine2D().scale(1, n / den).translate(0, m / den)\n", - " t2 = Affine2D().scale(1, m / den)\n", - " return combine(T(spline1, t1) * T(spline2, t2))\n", - "\n", - "beside(fish, fish)* unitsquare + above(fish,fish) * unitsquare" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One import tile in 'Square Limit' is what we will call ``smallfish`` which is our fish rotate by $45^o$ then flipped:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "smallfish = flip(rot45(fish))\n", - "smallfish * unitsquare" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now build the two central tesselations that are necessary to build 'Square Limit' which we will call ``t`` and ``u`` respectively:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = fish * smallfish * rot(rot(rot(smallfish)))\n", - "u = smallfish * rot(smallfish) * rot(rot(smallfish)) * rot(rot(rot(smallfish)))\n", - "t *unitsquare + u * unitsquare" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are now ready to define the two recursive functions that build the sides and corners of 'Square Limit' respectively. These recursive functions make use of ``quartet`` which is used to compress four splines into a small 2x2 grid:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "blank = hv.Spline(([(np.nan, np.nan)],[1])) # An empty Spline object useful for recursion\n", - "\n", - "def quartet(p, q, r, s):\n", - " return above(beside(p, q), beside(r, s))\n", - "\n", - "def side(n):\n", - " if n == 0: \n", - " return hv.Spline(([(np.nan, np.nan)],[1]))\n", - " else: \n", - " return quartet(side(n-1), side(n-1), rot(t), t)\n", - " \n", - "def corner(n):\n", - " if n == 0:\n", - " return hv.Spline(([(np.nan, np.nan)],[1]))\n", - " else:\n", - " return quartet(corner(n-1), side(n-1), rot(side(n-1)), u)\n", - " \n", - "\n", - "corner(2) + side(2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now have a way of building the corners and sides of 'Square Limit'. To do so, we will need one last function that will let us put the four corners and four sides in place together with the central tile:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def nonet(p, q, r, s, t, u, v, w, x):\n", - " return above(beside(p, beside(q, r), 1, 2),\n", - " above(beside(s, beside(t, u), 1, 2),\n", - " beside(v, beside(w, x), 1, 2)), 1, 2)\n", - "\n", - "args = [fish]* 4 + [blank] + [fish] * 4\n", - "nonet(*args)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we see use ``nonet`` to place eight of our fish around the edge of the square with a ``blank`` in the middle. We can finally use ``nonet`` together with our recursive ``corner`` and ``side`` functions to recreate 'Square Limit':" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%output size=250\n", - "def squarelimit(n):\n", - " return nonet(corner(n), side(n), rot(rot(rot(corner(n)))),\n", - " rot(side(n)), u, rot(rot(rot(side(n)))), \n", - " rot(corner(n)), rot(rot(side(n))), rot(rot(corner(n))))\n", - "squarelimit(3)" - ] - } - ], - "metadata": { - "language_info": { - "name": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/topics/simulation/boids.ipynb b/examples/topics/simulation/boids.ipynb deleted file mode 100644 index 888896ae76..0000000000 --- a/examples/topics/simulation/boids.ipynb +++ /dev/null @@ -1,254 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Boids\n", - "\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The above photograph of Auklet birds in flight show the classic flocking pattern observed when large groups of birds or fish travel together. Flocking is often cited as an example of [swarm intelligence](https://en.wikipedia.org/wiki/Swarm_intelligence) and the Boids models created by [Craig Reynolds](http://www.red3d.com/cwr/boids/) (1986) is one of the most well-known computational model of such behavior. In this model, each bird is represented by an agent in the simulation (called a boid) which follows a set of local rules. By defining how each boid responds to its neighbors, large groups of boids exhibit complex, emergent behaviors.\n", - "\n", - "In this notebook, we will set up a boid simulation and visualize and interact with it using HoloViews. The code used here is a highly condensed version of the boids code in the 'From Python to Numpy' book by Nicolas Rougier that you can find [here](https://www.labri.fr/perso/nrougier/from-python-to-numpy/#boids). This is an excellent resource for learning how to build simulations with NumPy and for learning how exactly the boids code used in this notebook works.\n", - "\n", - "We start by importing HoloViews and NumPy and loading the extension:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import holoviews as hv\n", - "import numpy as np\n", - "\n", - "hv.extension('bokeh')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining the simulation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our boids are simply points with an associated velocity that live in a 2D toroidal universe where the edges of the world wrap around. Our world has a width and a height, and our boids have a velocity ``vel`` and a position ``pos``.\n", - "\n", - "The following class defines the initial state of our boids simulation where we have defined a simple random initial state, giving our boids and initial randomized position and velocity:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def radarray(N):\n", - " \"Draw N random samples between 0 and 2pi radians\"\n", - " return np.random.uniform(0, 2*np.pi, N)\n", - "\n", - "class BoidState(object):\n", - " def __init__(self, N=500, width=400, height=400):\n", - " self.width, self.height, self.iteration = width, height, 0\n", - " self.vel = np.vstack([np.cos(radarray(N)), np.sin(radarray(N))]).T\n", - " r = min(width, height)/2*np.random.uniform(0, 1, N)\n", - " self.pos = np.vstack([width/2 + np.cos(radarray(N))*r, \n", - " height/2 + np.sin(radarray(N))*r]).T" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To keep our simulation code as short as possible, we define two helper functions that we will be reusing shortly:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def count(mask, n): \n", - " return np.maximum(mask.sum(axis=1), 1).reshape(n, 1)\n", - "\n", - "def limit_acceleration(steer, n, maxacc=0.03):\n", - " norm = np.sqrt((steer*steer).sum(axis=1)).reshape(n, 1)\n", - " np.multiply(steer, maxacc/norm, out=steer, where=norm > maxacc)\n", - " return norm, steer" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can define a highly condensed ``flock`` method on the ``Boids`` class which runs a single step of our boids flocking simulation. This code applies the following three local rules to all of our boid agents:\n", - "\n", - "* separation: Each boid steers to avoid crowding in its neighborhood\n", - "* alignment: Each boid steers towards the average heading of its localized neighbors\n", - "* cohesion: Each boid steers toward the average position (center of mass) of its localized neighbors" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class Boids(BoidState):\n", - " \n", - " def flock(self, min_vel=0.5, max_vel=2.0):\n", - " n = len(self.pos)\n", - " dx = np.subtract.outer(self.pos[:,0], self.pos[:,0])\n", - " dy = np.subtract.outer(self.pos[:,1], self.pos[:,1])\n", - " dist = np.hypot(dx, dy)\n", - " mask_1, mask_2 = (dist > 0) * (dist < 25), (dist > 0) * (dist < 50)\n", - " target = np.dstack((dx, dy))\n", - " target = np.divide(target, dist.reshape(n,n,1)**2, out=target, where=dist.reshape(n,n,1) != 0)\n", - " steer = (target*mask_1.reshape(n, n, 1)).sum(axis=1) / count(mask_1, n)\n", - " norm = np.sqrt((steer*steer).sum(axis=1)).reshape(n, 1)\n", - " steer = max_vel*np.divide(steer, norm, out=steer, where=norm != 0) - self.vel\n", - " norm, separation = limit_acceleration(steer, n)\n", - " target = np.dot(mask_2, self.vel)/count(mask_2, n)\n", - " norm = np.sqrt((target*target).sum(axis=1)).reshape(n, 1)\n", - " target = max_vel * np.divide(target, norm, out=target, where=norm != 0)\n", - " steer = target - self.vel\n", - " norm, alignment = limit_acceleration(steer, n)\n", - " target = np.dot(mask_2, self.pos)/ count(mask_2, n)\n", - " desired = target - self.pos\n", - " norm = np.sqrt((desired*desired).sum(axis=1)).reshape(n, 1)\n", - " desired *= max_vel / norm\n", - " steer = desired - self.vel\n", - " norm, cohesion = limit_acceleration(steer, n)\n", - " self.vel += 1.5 * separation + alignment + cohesion\n", - " norm = np.sqrt((self.vel*self.vel).sum(axis=1)).reshape(n, 1)\n", - " np.multiply(self.vel, max_vel/norm, out=self.vel, where=norm > max_vel)\n", - " np.multiply(self.vel, min_vel/norm, out=self.vel, where=norm < min_vel)\n", - " self.pos += self.vel + (self.width, self.height)\n", - " self.pos %= (self.width, self.height)\n", - " self.iteration += 1" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualizing the boid simulation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our simulation consists of points (boids) in 2D space that have a heading. The natural HoloViews element to visualize this data is the ``VectorField``. We start by setting some plot and style options for ``VectorField`` elements in this notebook:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%opts VectorField [xaxis=None yaxis=None] (scale=0.08)\n", - "%opts VectorField [normalize_lengths=False rescale_lengths=False] " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's initialize the simulation with 500 boids:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "boids = Boids(500)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can write a simple function that takes our ``boids`` simulation and returns a ``VectorField``, labelling it with the simulation iteration number. We can now use this to visualize the randomized initial state:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def boids_vectorfield(boids, iteration=1):\n", - " angle = (np.arctan2(boids.vel[:, 1], boids.vel[:, 0]))\n", - " return hv.VectorField([boids.pos[:,0], boids.pos[:,1], \n", - " angle, np.ones(boids.pos[:,0].shape)], extents=(0,0,400,400), \n", - " label='Iteration: %s' % boids.iteration)\n", - "\n", - "boids_vectorfield(boids)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we have our ``boids_vectorfield`` function, we can easily define a ``flock`` function that steps the flock simulation and returns the resulting ``VectorField``. This can be used in a ``DynamicMap`` together with the streams system as described in the Responding to Events and Custom Interactivity user guides:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from holoviews.streams import Stream\n", - "\n", - "def flock():\n", - " boids.flock()\n", - " return boids_vectorfield(boids)\n", - "\n", - "dmap = hv.DynamicMap(flock, streams=[Stream.define('Next')()])\n", - "dmap" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Initially, the output above shows the result of the simulation at iteration zero. By updating the stream (which has no parameters), we can now drive our simulation forwards using the ``event`` method on our ``DynamicMap``:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dmap.periodic(0.01, timeout=60, block=True) # Run the simulation for 60 seconds" - ] - } - ], - "metadata": { - "language_info": { - "name": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/topics/simulation/hipster_dynamics.ipynb b/examples/topics/simulation/hipster_dynamics.ipynb deleted file mode 100755 index 277b7e68c2..0000000000 --- a/examples/topics/simulation/hipster_dynamics.ipynb +++ /dev/null @@ -1,345 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# The Hipster Effect: An IPython Interactive Exploration" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*This notebook originally appeared as a [post](http://jakevdp.github.io/blog/2014/11/11/the-hipster-effect-interactive/) on the blog [Pythonic Perambulations](http://jakevdp.github.io). The content is BSD licensed.* It has been adapted to use HoloViews by [Philipp Rudiger](http://philippjfr.com)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This week I started seeing references all over the internet to [this paper](http://arxiv.org/abs/1410.8001): *The Hipster Effect: When Anticonformists All Look The Same*. It essentially describes a simple mathematical model which models conformity and non-conformity among a mutually interacting population, and finds some interesting results: namely, **conformity among a population of self-conscious non-conformists is similar to a phase transition in a time-delayed thermodynamic system**. In other words, with enough hipsters around responding to delayed fashion trends, a plethora of facial hair and fixed gear bikes is a natural result.\n", - "\n", - "Also naturally, upon reading the paper I wanted to try to reproduce the work. The paper solves the problem analytically for a continuous system and shows the precise values of certain phase transitions within the long-term limit of the postulated system. Though such theoretical derivations are useful, I often find it more intuitive to simulate systems like this in a more approximate manner to gain hands-on understanding." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Mathematically Modeling Hipsters\n", - "\n", - "We'll start by defining the problem, and going through the notation suggested in the paper. We'll consider a group of $N$ people, and define the following quantities:\n", - "\n", - "- $\\epsilon_i$ : this value is either $+1$ or $-1$. $+1$ means person $i$ is a hipster, while $-1$ means they're a conformist.\n", - "- $s_i(t)$ : this is also either $+1$ or $-1$. This indicates person $i$'s choice of style at time $t$. For example, $+1$ might indicated a bushy beard, while $-1$ indicates clean-shaven.\n", - "- $J_{ij}$ : The influence matrix. This is a value greater than zero which indicates how much person $j$ influences person $i$.\n", - "- $\\tau_{ij}$ : The delay matrix. This is an integer telling us the length of delay for the style of person $j$ to affect the style of person $i$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The idea of the model is this: on any given day, person $i$ looks at the world around him or her, and sees some previous day's version of everyone else. This information is $s_j(t - \\tau_{ij})$.\n", - "\n", - "The amount that person $j$ influences person $i$ is given by the influence matrix, $J_{ij}$, and after putting all the information together, we see that person $i$'s mean impression of the world's style is\n", - "\n", - "$$\n", - "m_i(t) = \\frac{1}{N} \\sum_j J_{ij} \\cdot s_j(t - \\tau_{ij})\n", - "$$\n", - "\n", - "Given the problem setup, we can quickly check whether this impression matches their own current style:\n", - "\n", - "- if $m_i(t) \\cdot s_i(t) > 0$, then person $i$ matches those around them\n", - "- if $m_i(t) \\cdot s_i(t) < 0$, then person $i$ looks different than those around them\n", - "\n", - "A hipster who notices that their style matches that of the world around them will risk giving up all their hipster cred if they don't change quickly; a conformist will have the opposite reaction. Because $\\epsilon_i$ = $+1$ for a hipster and $-1$ for a conformist, we can encode this observation in a single value which tells us what which way the person will lean that day:\n", - "\n", - "$$\n", - "x_i(t) = -\\epsilon_i m_i(t) s_i(t)\n", - "$$\n", - "\n", - "Simple! If $x_i(t) > 0$, then person $i$ will more likely switch their style that day, and if $x_i(t) < 0$, person $i$ will more likely maintain the same style as the previous day. So we have a formula for how to update each person's style based on their preferences, their influences, and the world around them." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But the world is a noisy place. Each person might have other things going on that day, so instead of using this value directly, we can turn it in to a probabilistic statement. Consider the function\n", - "\n", - "$$\n", - "\\phi(x;\\beta) = \\frac{1 + \\tanh(\\beta \\cdot x)}{2}\n", - "$$\n", - "\n", - "We can plot this function quickly:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import holoviews as hv\n", - "hv.extension('bokeh', 'matplotlib', width=90)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%output backend='matplotlib'\n", - "%%opts NdOverlay [aspect=1.5 figure_size=200 legend_position='top_left']\n", - "x = np.linspace(-1, 1, 1000)\n", - "curves = hv.NdOverlay(key_dimensions=['$\\\\beta$'])\n", - "for beta in [0.1, 0.5, 1, 5]:\n", - " curves[beta] = hv.Curve(zip(x, 0.5 * (1 + np.tanh(beta * x))),\n", - " '$x$', '$\\\\phi(x;\\\\beta)$')\n", - "curves" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This gives us a nice way to move from our preference $x_i$ to a probability of switching styles. Here $\\beta$ is inversely related to noise. For large $\\beta$, the noise is small and we basically map $x > 0$ to a 100% probability of switching, and $x<0$ to a 0% probability of switching. As $\\beta$ gets smaller, the probabilities get less and less distinct." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The Code\n", - "\n", - "Let's see this model in action. We'll start by defining a class which implements everything we've gone through above:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class HipsterStep(object):\n", - " \"\"\"Class to implement hipster evolution\n", - " \n", - " Parameters\n", - " ----------\n", - " initial_style : length-N array\n", - " values > 0 indicate one style, while values <= 0 indicate the other.\n", - " is_hipster : length-N array\n", - " True or False, indicating whether each person is a hipster\n", - " influence_matrix : N x N array\n", - " Array of non-negative values. influence_matrix[i, j] indicates\n", - " how much influence person j has on person i\n", - " delay_matrix : N x N array\n", - " Array of positive integers. delay_matrix[i, j] indicates the\n", - " number of days delay between person j's influence on person i.\n", - " \"\"\"\n", - " def __init__(self, initial_style, is_hipster,\n", - " influence_matrix, delay_matrix,\n", - " beta=1, rseed=None):\n", - " self.initial_style = initial_style\n", - " self.is_hipster = is_hipster\n", - " self.influence_matrix = influence_matrix\n", - " self.delay_matrix = delay_matrix\n", - " \n", - " self.rng = np.random.RandomState(rseed)\n", - " self.beta = beta\n", - " \n", - " # make s array consisting of -1 and 1\n", - " self.s = -1 + 2 * (np.atleast_2d(initial_style) > 0)\n", - " N = self.s.shape[1]\n", - " \n", - " # make eps array consisting of -1 and 1\n", - " self.eps = -1 + 2 * (np.asarray(is_hipster) > 0)\n", - " \n", - " # create influence_matrix and delay_matrix\n", - " self.J = np.asarray(influence_matrix, dtype=float)\n", - " self.tau = np.asarray(delay_matrix, dtype=int)\n", - " \n", - " # validate all the inputs\n", - " assert self.s.ndim == 2\n", - " assert self.s.shape[1] == N\n", - " assert self.eps.shape == (N,)\n", - " assert self.J.shape == (N, N)\n", - " assert np.all(self.J >= 0)\n", - " assert np.all(self.tau > 0)\n", - "\n", - " @staticmethod\n", - " def phi(x, beta):\n", - " return 0.5 * (1 + np.tanh(beta * x))\n", - " \n", - " def step_once(self):\n", - " N = self.s.shape[1]\n", - " \n", - " # iref[i, j] gives the index for the j^th individual's\n", - " # time-delayed influence on the i^th individual\n", - " iref = np.maximum(0, self.s.shape[0] - self.tau)\n", - " \n", - " # sref[i, j] gives the previous state of the j^th individual\n", - " # which affects the current state of the i^th individual\n", - " sref = self.s[iref, np.arange(N)]\n", - "\n", - " # m[i] is the mean of weighted influences of other individuals\n", - " m = (self.J * sref).sum(1) / self.J.sum(1)\n", - " \n", - " # From m, we use the sigmoid function to compute a transition probability\n", - " transition_prob = self.phi(-self.eps * m * self.s[-1], beta=self.beta)\n", - " \n", - " # Now choose steps stochastically based on this probability\n", - " new_s = np.where(transition_prob > self.rng.rand(N), -1, 1) * self.s[-1]\n", - " \n", - " # Add this to the results, and return\n", - " self.s = np.vstack([self.s, new_s])\n", - " return self.s\n", - " \n", - " def step(self, N):\n", - " for i in range(N):\n", - " self.step_once()\n", - " return self.s\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we'll create a function which will return an instance of the HipsterStep class with the appropriate settings:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_sim(Npeople=500, hipster_frac=0.8, initial_state_frac=0.5, delay=20, log10_beta=0.5, rseed=42):\n", - "\n", - " rng = np.random.RandomState(rseed)\n", - "\n", - " initial_state = (rng.rand(1, Npeople) > initial_state_frac)\n", - " is_hipster = (rng.rand(Npeople) > hipster_frac)\n", - "\n", - " influence_matrix = abs(rng.randn(Npeople, Npeople))\n", - " influence_matrix.flat[::Npeople + 1] = 0\n", - "\n", - " delay_matrix = 1 + rng.poisson(delay, size=(Npeople, Npeople))\n", - "\n", - " return HipsterStep(initial_state, is_hipster, influence_matrix, delay_matrix=delay_matrix,\n", - " beta=10 ** log10_beta, rseed=rseed)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Exploring this data" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we've defined the simulation, we can start exploring this data. I'll quickly demonstrate how to advance simulation time and get the results.\n", - "\n", - "First we initialize the model with a certain fraction of hipsters:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sim = get_sim(hipster_frac=0.8)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To run the simulation a number of steps we execute ``sim.step(Nsteps)`` giving us a matrix of identities for each invidual at each timestep." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = sim.step(200)\n", - "result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can simply go right ahead and visualize this data using an Image Element type, defining the dimensions and bounds of the space." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Image [width=600]\n", - "hv.Image(result.T, ['Time', 'individual'], 'State', bounds=(0, 0, 100, 500))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that you know how to run the simulation and access the data have a go at exploring the effects of different parameters on the population dynamics or apply some custom analyses to this data. Here are two quick examples of what you can do:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Curve [width=350] (line_color='black') Image [width=350]\n", - "hipster_frac = hv.HoloMap(kdims='Hipster Fraction')\n", - "hipster_curves = hipster_frac.clone(shared_data=False)\n", - "for i in np.linspace(0.1, 1, 10):\n", - " sim = get_sim(hipster_frac=i)\n", - " img = hv.Image(sim.step(200).T.astype('int8'), ['Time', 'individual'], 'Bearded',\n", - " bounds=(0, 0, 500, 500), group='Population Dynamics')\n", - " hipster_frac[i] = img\n", - " agg = img.aggregate('Time', function=np.mean, spreadfn=np.std)\n", - " hipster_curves[i] = hv.ErrorBars(agg) * hv.Curve(agg)\n", - "(hipster_frac + hipster_curves)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Your turn" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "What intuitions can you develop about this system? How do the different parameters affect it?" - ] - } - ], - "metadata": { - "language_info": { - "name": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/examples/topics/simulation/sri_model.ipynb b/examples/topics/simulation/sri_model.ipynb deleted file mode 100755 index d84c35af11..0000000000 --- a/examples/topics/simulation/sri_model.ipynb +++ /dev/null @@ -1,646 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For a recent talk in my department I talked a little bit about agent based modeling and in the process I came across the simple but quite interesting SIR model in epidemiology. The inspiration for this post was Simon Dobson's post on [Epidemic spreading processes](http://www.simondobson.org/complex-networks-complex-processes/epidemic-spreading.html), which will provide a much more detailed scientific background and take you through some of the code step by step. However as a brief introduction\n", - "\n", - "I've made some minor tweaks to the model by adding vaccinated and dead states. I've also unified the function based approach into a single Parameterized class, which takes care of initializing, running and visualizing the network.\n", - "\n", - "In this blog post I'll primarily look at how we can quickly create complex visualization about this model using HoloViews. In the process I'll look at some predictions this model can make about herd immunity but won't be giving it any rigorous scientific treatment." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The Code" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here's the code for the model relying only on numpy, networkx, holoviews and matplotlib in the background." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import collections\n", - "import itertools\n", - "import math\n", - "\n", - "import numpy as np\n", - "np.seterr(divide='ignore')\n", - "import numpy.random as rnd\n", - "import networkx as nx\n", - "\n", - "import param\n", - "import holoviews as hv\n", - "\n", - "\n", - "class SRI_Model(param.Parameterized):\n", - " \"\"\"\n", - " Implementation of the SRI epidemiology model\n", - " using NetworkX and HoloViews for visualization.\n", - " This code has been adapted from Simon Dobson's\n", - " code here:\n", - " \n", - " http://www.simondobson.org/complex-networks-complex-processes/epidemic-spreading.html\n", - " \n", - " In addition to his basic parameters I've added\n", - " additional states to the model, a node may be\n", - " in one of the following states:\n", - " \n", - " * Susceptible: Can catch the disease from a connected node.\n", - " * Vaccinated: Immune to infection.\n", - " * Infected: Has the disease and may pass it on to any connected node.\n", - " * Recovered: Immune to infection.\n", - " * Dead: Edges are removed from graph.\n", - " \"\"\"\n", - "\n", - " network = param.ClassSelector(class_=nx.Graph, default=None, doc=\"\"\"\n", - " A custom NetworkX graph, instead of the default Erdos-Renyi graph.\"\"\")\n", - " \n", - " visualize = param.Boolean(default=True, doc=\"\"\"\n", - " Whether to compute layout of network for visualization.\"\"\")\n", - " \n", - " N = param.Integer(default=1000, doc=\"\"\"\n", - " Number of nodes to simulate.\"\"\")\n", - " \n", - " mean_connections = param.Number(default=10, doc=\"\"\"\n", - " Mean number of connections to make to other nodes.\"\"\")\n", - " \n", - " pSick = param.Number(default=0.01, doc=\"\"\"\n", - " Probability of a node to be initialized in sick state.\"\"\", bounds=(0, 1))\n", - "\n", - " pVaccinated = param.Number(default=0.1, bounds=(0, 1), doc=\"\"\"\n", - " Probability of a node to be initialized in vaccinated state.\"\"\")\n", - " \n", - " pInfect = param.Number(default=0.3, doc=\"\"\"\n", - " Probability of infection on each time step.\"\"\", bounds=(0, 1))\n", - " \n", - " pRecover = param.Number(default=0.05, doc=\"\"\"\n", - " Probability of recovering if infected on each timestep.\"\"\", bounds=(0, 1))\n", - " \n", - " pDeath = param.Number(default=0.1, doc=\"\"\"\n", - " Probability of death if infected on each timestep.\"\"\", bounds=(0, 1))\n", - " \n", - " \n", - " SPREADING_SUSCEPTIBLE = 'S'\n", - " SPREADING_VACCINATED = 'V'\n", - " SPREADING_INFECTED = 'I'\n", - " SPREADING_RECOVERED = 'R'\n", - " DEAD = 'D'\n", - "\n", - " def __init__(self, **params):\n", - " super(SRI_Model, self).__init__(**params)\n", - " if not self.network:\n", - " self.g = nx.erdos_renyi_graph(self.N, float(self.mean_connections)/self.N)\n", - " else:\n", - " self.g = self.network\n", - " self.vaccinated, self.infected = self.spreading_init()\n", - " self.model = self.spreading_make_sir_model()\n", - " self.color_mapping = [self.SPREADING_SUSCEPTIBLE,\n", - " self.SPREADING_VACCINATED,\n", - " self.SPREADING_INFECTED,\n", - " self.SPREADING_RECOVERED, self.DEAD]\n", - " if self.visualize:\n", - " k = 2/(math.sqrt(self.g.order()))\n", - " self.pos = hv.Graph.from_networkx(self.g, nx.spring_layout, iterations=50, k=k)\n", - "\n", - " def spreading_init(self):\n", - " \"\"\"Initialise the network with vaccinated, susceptible and infected states.\"\"\"\n", - " vaccinated, infected = 0, []\n", - " for i in self.g.node.keys():\n", - " self.g.node[i]['transmissions'] = 0\n", - " if(rnd.random() <= self.pVaccinated): \n", - " self.g.node[i]['state'] = self.SPREADING_VACCINATED\n", - " vaccinated += 1\n", - " elif(rnd.random() <= self.pSick):\n", - " self.g.node[i]['state'] = self.SPREADING_INFECTED\n", - " infected.append(i)\n", - " else:\n", - " self.g.node[i]['state'] = self.SPREADING_SUSCEPTIBLE\n", - " return vaccinated, infected\n", - "\n", - " def spreading_make_sir_model(self):\n", - " \"\"\"Return an SIR model function for given infection and recovery probabilities.\"\"\"\n", - " # model (local rule) function\n", - " def model( g, i ):\n", - " if g.node[i]['state'] == self.SPREADING_INFECTED:\n", - " # infect susceptible neighbours with probability pInfect\n", - " for m in g.neighbors(i):\n", - " if g.node[m]['state'] == self.SPREADING_SUSCEPTIBLE:\n", - " if rnd.random() <= self.pInfect:\n", - " g.node[m]['state'] = self.SPREADING_INFECTED\n", - " self.infected.append(m)\n", - " g.node[i]['transmissions'] += 1\n", - "\n", - " # recover with probability pRecover\n", - " if rnd.random() <= self.pRecover:\n", - " g.node[i]['state'] = self.SPREADING_RECOVERED\n", - " elif rnd.random() <= self.pDeath:\n", - " edges = [edge for edge in self.g.edges() if i in edge] \n", - " g.node[i]['state'] = self.DEAD\n", - " g.remove_edges_from(edges)\n", - "\n", - " return model\n", - "\n", - " def step(self):\n", - " \"\"\"Run a single step of the model over the graph.\"\"\"\n", - " for i in self.g.node.keys():\n", - " self.model(self.g, i)\n", - "\n", - " def run(self, steps):\n", - " \"\"\"\n", - " Run the network for the specified number of time steps\n", - " \"\"\"\n", - " for i in range(steps):\n", - " self.step()\n", - "\n", - " def stats(self):\n", - " \"\"\"\n", - " Return an ItemTable with statistics on the network data.\n", - " \"\"\"\n", - " state_labels = hv.OrderedDict([('S', 'Susceptible'), ('V', 'Vaccinated'), ('I', 'Infected'),\n", - " ('R', 'Recovered'), ('D', 'Dead')])\n", - " counts = collections.Counter()\n", - " transmissions = []\n", - " for n in self.g.nodes():\n", - " state = state_labels[self.g.node[n]['state']]\n", - " counts[state] += 1\n", - " if n in self.infected:\n", - " transmissions.append(self.g.node[n]['transmissions'])\n", - " data = hv.OrderedDict([(l, counts[l])\n", - " for l in state_labels.values()])\n", - " \n", - " infected = len(set(self.infected))\n", - " unvaccinated = float(self.N-self.vaccinated)\n", - " \n", - " data['$R_0$'] = np.mean(transmissions) if transmissions else 0\n", - " data['Death rate DR'] = np.divide(float(data['Dead']),self.N)\n", - " data['Infection rate IR'] = np.divide(float(infected), self.N)\n", - " if unvaccinated:\n", - " unvaccinated_dr = data['Dead']/unvaccinated\n", - " unvaccinated_ir = infected/unvaccinated\n", - " else:\n", - " unvaccinated_dr = 0\n", - " unvaccinated_ir = 0\n", - " data['Unvaccinated DR'] = unvaccinated_dr\n", - " data['Unvaccinated IR'] = unvaccinated_ir\n", - " return hv.ItemTable(data)\n", - "\n", - " def animate(self, steps):\n", - " \"\"\"\n", - " Run the network for the specified number of steps accumulating animations\n", - " of the network nodes and edges changing states and curves tracking the\n", - " spread of the disease.\n", - " \"\"\"\n", - " if not self.visualize:\n", - " raise Exception(\"Enable visualize option to get compute network visulizations.\")\n", - "\n", - " # Declare HoloMap for network animation and counts array\n", - " network_hmap = hv.HoloMap(kdims='Time')\n", - " sird = np.zeros((steps, 5))\n", - " \n", - " # Declare labels\n", - " state_labels = ['Susceptible', 'Vaccinated', 'Infected', 'Recovered', 'Dead']\n", - "\n", - " # Text annotation\n", - " nlabel = hv.Text(0.9, 0.05, 'N=%d' % self.N)\n", - "\n", - " for i in range(steps):\n", - " # Get path, point, states and count data\n", - " states = [self.g.node[n]['state'] for n in self.g.nodes()]\n", - " state_ints = [self.color_mapping.index(v) for v in states]\n", - " state_array = np.array(state_ints, ndmin=2).T\n", - " (sird[i, :], _) = np.histogram(state_array, bins=list(range(6)))\n", - "\n", - " # Create network path and node Elements\n", - " nodes = self.pos.nodes.clone(datatype=['dictionary'])\n", - " nodes = nodes.add_dimension('State', 0, states, True)\n", - " graph = self.pos.clone((self.pos.data.copy(), nodes))\n", - " # Create overlay and accumulate in network HoloMap\n", - " network_hmap[i] = (graph * nlabel).relabel(group='Network', label='SRI')\n", - " self.step()\n", - "\n", - " # Create Overlay of Curves\n", - " #extents = (-1, -1, steps, np.max(sird)+2)\n", - " curves = hv.NdOverlay({label: hv.Curve(zip(range(steps), sird[:, i]),\n", - " 'Time', 'Count')\n", - " for i, label in enumerate(state_labels)},\n", - " kdims=[hv.Dimension('State', values=state_labels)])\n", - " \n", - " # Animate VLine on top of Curves\n", - " distribution = hv.HoloMap({i: (curves * hv.VLine(i)).relabel(group='Counts', label='SRI')\n", - " for i in range(steps)}, kdims='Time')\n", - " \n", - " return network_hmap + distribution" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The style" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "HoloViews allows use to define various style options in advance on the Store.options object." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "hv.extension('bokeh', 'matplotlib')\n", - "\n", - "# Set colors and style options for the Element types\n", - "from holoviews import Store, Options\n", - "opts = Store.options()\n", - "\n", - "colormap = {k: v for k, v in zip('SVIRD', hv.Cycle().values)}\n", - "\n", - "opts.Graph = Options('plot', color_index='State')\n", - "opts.Graph = Options('style', cmap=colormap, node_size=6, edge_line_width=1)\n", - "opts.Histogram = Options('plot', show_grid=False)\n", - "opts.Overlay = Options('plot', show_frame=False)\n", - "opts.HeatMap = Options('plot', xrotation=90)\n", - "opts.ItemTable = Options('plot', width=900, height=50)\n", - "\n", - "opts.Overlay.Network = Options('plot', xaxis=None, yaxis=None)\n", - "opts.Overlay.Counts = Options('plot', show_grid=True)\n", - "\n", - "opts.VLine = {'style': Options(color='black', line_width=1),\n", - " 'plot': Options(show_grid=True)}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Herd Immunity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Experiment 1: Evaluating the effects of a highly infectious and deadly disease in a small population with varying levels of vaccination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Having defined the model and defined the model we can run some real experiments. In particular we can investigate the effect of vaccination on our model.\n", - "\n", - "We'll initialize our model with only 50 inviduals, who will on average make 10 connections to other individuals. Then we will infect a small population ($p=0.1$) so we can track how the disease spreads through the population. To really drive the point home we'll use a very infectious and deadly disease." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "experiment1_params = dict(pInfect=0.08, pRecover=0.08, pSick=0.15,\n", - " N=50, mean_connections=10, pDeath=0.1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Low vaccination population (10%)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we'll investigate the spread of the disease in population with a 10% vaccination rate:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sri_model = SRI_Model(pVaccinated=0.1, **experiment1_params)\n", - "sri_model.animate(21).redim.range(x=(-1.2, 1.2), y=(-1.2, 1.2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In figure **A** we can observe how the disease quickly spreads across almost the entire unvaccinated population. Additionally we can track the number of individuals in a particular state in **B**. As the disease spreads unimpeded the most individuals either die or recover and therefore gain immunity. Individuals that die are obviously no longer part of the network so their connections to other individuals get deleted, this way we can see the network thin out as the disease wreaks havok among the population.\n", - "\n", - "Next we can view a breakdown of the final state of the simulation including infection and death rates:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts ItemTable [width=900 height=50]\n", - "sri_model.stats()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As you can see both the infection and death rates are very high in this population. The disease reached a large percentage all individuals causing death in a large fraction of them. Among the unvaccinated population they are of course even higher with almost >90% infected and >40% dead. The disease spread through our network completely unimpeded. Now let's see what happens if a large fraction of the population is vaccinated." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## High vaccination population (65%)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we increase the initial probability of being vaccinated to $p=0.65$ we'll be able to observe how this affects the spread of the disease through the network: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sri_model = SRI_Model(pVaccinated=0.65, **experiment1_params)\n", - "sri_model.animate(21)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Even though we can still see the disease spreading among non-vaccinated individuals we can also observe how the vaccinated individuals stop the spread. If an infected individual is connected with a majority of vaccinated indivuals the probability of the disease spreading is strongly impeded. Unlike in low vaccinated population the disease stops its spread not because too many individuals have died off, rather it quickly runs out of steam, such that a majority of the initial, susceptible but healthy population remains completely unaffected. \n", - "\n", - "This is what's known as herd immunity and its very important. This is because a small percentage of any population cannot be vaccinated, usually because they are immuno-compromised. However when a larger percentage of people decide that they do not want to get vaccinated (for various and invariably stupid reasons), they place the rest of the population in danger, particularly those that cannot get vaccinated for health reasons.\n", - "\n", - "Let's look what higher vaccination rates did to our experimental population:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sri_model.stats()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The precipetous drop in the whole populations infection rate and death rate are obviously easily explained by the fact that a smaller fraction of the population was susceptible to the disease in the first place, however as herd immunity would predict, a smaller fraction of the unvaccinated population contracted and died of the disease as well. I hope this toy example once again emphasizes how important vaccination and herd immunity is." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Large networks" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before we have a more systematic look at herd immunity we'll increase the population size to 1000 individuals and have a look at what our virulent disease does to this population, if nothing else it'll produce a pretty plot." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%output holomap='scrubber' size=150\n", - "sri_model_lv = SRI_Model(pVaccinated=0.1, **dict(experiment1_params, N=1000))\n", - "sri_layout = sri_model_lv.animate(31)\n", - "sri_layout.Network.SRI[::2]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sri_model_hv = SRI_Model(pVaccinated=0.65, visualize=False, **dict(experiment1_params, N=1000))\n", - "sri_model_hv.run(100)\n", - "(sri_model_lv.stats().relabel('Low Vaccination Population') +\n", - " sri_model_hv.stats().relabel('High Vaccination Population')).cols(1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see the effect we observed in our smaller simulations from above still hold. Unvaccinated individuals are much safer in the high vaccination population than they are in the low vaccine population." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Experiment 2: Systematically exploring the effect of vaccination rates and connectivity on infection and death rates in a large population" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's conduct a more systematic experiment by varying the vaccination rate and number of connections between individuals. In Experiment 1 we saw that vaccination rates could drastically reduce infection and death rates even among the unvaccinated population. Here we'll use a much less deadly disease as we're primarily interested in is how the disease spreads through populations with more and less connections and different vaccination rates. We'll also use a larger population (N=1000) to get a more representative sample. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "experiment2_params = dict(N=1000, pInfect=0.05, pRecover=0.05,\n", - " pSick=0.05, pDeath=0.001, visualize=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we explore the parameter space, we'll run the model for vaccination rates from 0% to 100% in 5% increments and for increasing numbers of connections. To speed the whole thing up we've disabled computing the network layout with the ``visualize`` parameter and will only be collecting the final simulation statistics. Finally we can simply deconstruct our data into a pandas data frame." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exp2_dims = ['Connections', 'pVaccinated']\n", - "hmap = hv.HoloMap(kdims=exp2_dims)\n", - "vacc_rates = np.linspace(0, 1, 21)\n", - "mean_conns = [2**i for i in range(7)]\n", - "for v, c in itertools.product(vacc_rates, mean_conns):\n", - " sri_model = SRI_Model(mean_connections=c, pVaccinated=v, **experiment2_params)\n", - " sri_model.run(100)\n", - " hmap[c, v] = sri_model.stats()\n", - "df = hmap.dframe()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before we start visualizing this data let's have a look at it:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df[::20]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Regressions between vaccination, infection and death rates" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using the HoloViews pandas and seaborn extensions we can now perform regressions on the vaccination rates against infection and death rates. However since we also varied the mean number of connections between individuals in the network we want to consider these variables independently. By assigning the number of connections to a HoloMap we can view each plot independently with a widget.\n", - "\n", - "Let's define the quantities we want to visualize and switch to matplotlib for these last few plots:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "quantities = ['Unvaccinated IR', 'Infection rate IR', 'Death rate DR', '$R_0$']\n", - "state_labels = ['Susceptible', 'Vaccinated', 'Infected', 'Recovered', 'Dead']\n", - "\n", - "%output backend='matplotlib'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts Layout [fig_size=200] \n", - "%%opts TriSurface (cmap='Reds_r' linewidth=0.1)\n", - "ds = hv.Dataset(df)\n", - "(ds.to.trisurface(['pVaccinated', 'Connections'], '$R_0$', [], group='$R_0$') +\n", - " ds.to.trisurface(['pVaccinated', 'Connections'], 'Infection rate IR', [], group='Infection Rate'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By varying the number of connections we can observe second order effects that would usually be invisible to us. After playing around with it for a little we can draw the following conclusions:\n", - "\n", - "* Greater number of connections in the network lead to drastically higher infection and death rates.\n", - "* Infection rates scale linearly with death rates for very low and very high number of connections.\n", - "* For intermediate levels of network connectivity the relationship between vaccination and infection rates more closely resembles exponential decay, i.e. achieving a basic level of vaccination in a population has a greater payoff than boosting vaccination rates in populations where they are already high.\n", - "* The more highly connected a population the higher the vaccination rates have to be to effectively protect the population.\n", - "\n", - "These results emphasize how important it is to maintain high vaccination rates in the highly connected societies we live in today. Even more importantly they show how important it is to continue vaccination programs in developing countries where they'll have the greatest impact.\n", - "\n", - "We can also present the data in a different way examining all the data at once in a HeatMap." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%opts HeatMap [show_values=False aspect=1.5 xrotation=90] Histogram {+axiswise} Layout [fig_size=150]\n", - "group_colors = zip(quantities, ['Blues', 'Reds', 'Greens', 'Purples'])\n", - "hv.Layout([hv.Table(df).to.heatmap(['pVaccinated', 'Connections'],\n", - " q, [], group=q).opts(style=dict(cmap=c)).hist()\n", - " for q, c in group_colors]).cols(2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This view highlights the gradient along which vaccination is highly effective and then becoming less effective as the saturation of the colors increases." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Summary" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Remember \"It's only a model\", and a fairly simple one at that, but it provides some very clear predictions, which we've also observed in the real world. Getting the most out of models like this or even far more complex simulations requires tools that will allow us to make sense of interactions between many variables. I hope this post has gone some way towards persuading you that HoloViews is that tool, if so have a look at the [HoloViews website](www.holoviews.org) for more information." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Your turn" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ``SRI_Model`` class provided above is deliberately very customizable. If you want to play with the model further try varying some other parameters and explore the effects on the model or supply your own network via the ``network`` parameter. There are a variety of tools to extract NetworkX Graph structures, so you could put together a model of your own social network. Hope you enjoyed it and have fun!" - ] - } - ], - "metadata": { - "language_info": { - "name": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} From 49a450b5478445ab913845d311d4d943c06621a1 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Wed, 2 Oct 2019 19:49:27 +0200 Subject: [PATCH 28/52] Made shared_axes option supported generally and toggle axiswise when disabled (#3410) --- holoviews/plotting/bokeh/element.py | 3 +-- holoviews/plotting/mpl/plot.py | 4 ++++ holoviews/plotting/plot.py | 1 + holoviews/plotting/plotly/plot.py | 8 ++++++++ holoviews/tests/plotting/bokeh/testlayoutplot.py | 13 ++++++++++++- .../tests/plotting/matplotlib/testlayoutplot.py | 8 ++++++++ 6 files changed, 34 insertions(+), 3 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index 4adb66bc82..eb8fddcb07 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -352,8 +352,7 @@ def _axes_props(self, plots, subplots, element, ranges): plot_ranges = {} # Try finding shared ranges in other plots in the same Layout - norm_opts = self.lookup_options(el, 'norm').options - if plots and self.shared_axes and not norm_opts.get('axiswise', False): + if plots and self.shared_axes: plot_ranges = self._merge_ranges(plots, xspecs, yspecs) # Get the Element that determines the range and get_extents diff --git a/holoviews/plotting/mpl/plot.py b/holoviews/plotting/mpl/plot.py index 442075e06b..3db2dc23c1 100644 --- a/holoviews/plotting/mpl/plot.py +++ b/holoviews/plotting/mpl/plot.py @@ -257,6 +257,10 @@ class CompositePlot(GenericCompositePlot, MPLPlot): subplots to form a Layout. """ + shared_axes = param.Boolean(default=True, doc=""" + Whether axes ranges should be shared across the layout, if + disabled switches axiswise normalization option on globally.""") + def _link_dimensioned_streams(self): """ Should perform any linking required to update titles when dimensioned diff --git a/holoviews/plotting/plot.py b/holoviews/plotting/plot.py index d19dd700af..5d0b0c3c3a 100644 --- a/holoviews/plotting/plot.py +++ b/holoviews/plotting/plot.py @@ -374,6 +374,7 @@ def compute_ranges(self, obj, key, ranges): # been supplied from a composite plot return_fn = lambda x: x if isinstance(x, Element) else None for group, (axiswise, framewise) in norm_opts.items(): + axiswise = not getattr(self, 'shared_axes', not axiswise) elements = [] # Skip if ranges are cached or already computed by a # higher-level container object. diff --git a/holoviews/plotting/plotly/plot.py b/holoviews/plotting/plotly/plot.py index 5357367331..9c08250e52 100644 --- a/holoviews/plotting/plotly/plot.py +++ b/holoviews/plotting/plotly/plot.py @@ -54,6 +54,10 @@ class LayoutPlot(PlotlyPlot, GenericLayoutPlot): vspacing = param.Number(default=0.15, bounds=(0, 1)) + shared_axes = param.Boolean(default=True, doc=""" + Whether axes ranges should be shared across the layout, if + disabled switches axiswise normalization option on globally.""") + def __init__(self, layout, **params): super(LayoutPlot, self).__init__(layout, **params) self.layout, self.subplots, self.paths = self._init_layout(layout) @@ -295,6 +299,10 @@ class GridPlot(PlotlyPlot, GenericCompositePlot): vspacing = param.Number(default=0.05, bounds=(0, 1)) + shared_axes = param.Boolean(default=True, doc=""" + Whether axes ranges should be shared across the layout, if + disabled switches axiswise normalization option on globally.""") + def __init__(self, layout, ranges=None, layout_num=1, **params): if not isinstance(layout, GridSpace): raise Exception("GridPlot only accepts GridSpace.") diff --git a/holoviews/tests/plotting/bokeh/testlayoutplot.py b/holoviews/tests/plotting/bokeh/testlayoutplot.py index d7d1116349..6d01fb499e 100644 --- a/holoviews/tests/plotting/bokeh/testlayoutplot.py +++ b/holoviews/tests/plotting/bokeh/testlayoutplot.py @@ -296,4 +296,15 @@ def cb(aname): stream.event(aname=T) self.assertIn('aname: ' + T, p.handles['title'].text, p.handles['title'].text) p.cleanup() - self.assertEqual(stream._subscribers, []) \ No newline at end of file + self.assertEqual(stream._subscribers, []) + + def test_layout_shared_axes_disabled(self): + from holoviews.plotting.bokeh import CurvePlot + layout = (Curve([1, 2, 3]) + Curve([10, 20, 30])).opts(shared_axes=False) + plot = bokeh_renderer.get_plot(layout) + cp1, cp2 = plot.subplots[(0, 0)].subplots['main'], plot.subplots[(0, 1)].subplots['main'] + self.assertFalse(cp1.handles['y_range'] is cp2.handles['y_range']) + self.assertEqual(cp1.handles['y_range'].start, 1) + self.assertEqual(cp1.handles['y_range'].end, 3) + self.assertEqual(cp2.handles['y_range'].start, 10) + self.assertEqual(cp2.handles['y_range'].end, 30) diff --git a/holoviews/tests/plotting/matplotlib/testlayoutplot.py b/holoviews/tests/plotting/matplotlib/testlayoutplot.py index 08132c09d6..ebcae62f13 100644 --- a/holoviews/tests/plotting/matplotlib/testlayoutplot.py +++ b/holoviews/tests/plotting/matplotlib/testlayoutplot.py @@ -47,3 +47,11 @@ def test_layout_dimensioned_stream_title_update(self): self.assertIn('test: 1', plot.handles['title'].get_text()) plot.cleanup() self.assertEqual(stream._subscribers, []) + + def test_layout_shared_axes_disabled(self): + from holoviews.plotting.mpl import CurvePlot + layout = (Curve([1, 2, 3]) + Curve([10, 20, 30])).opts(shared_axes=False) + plot = mpl_renderer.get_plot(layout) + cp1, cp2 = plot.traverse(lambda x: x, [CurvePlot]) + self.assertTrue(cp1.handles['axis'].get_ylim(), (1, 3)) + self.assertTrue(cp2.handles['axis'].get_ylim(), (10, 30)) From c50bdc27ed84348467e5b1c596685904dff9a294 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 3 Oct 2019 01:46:50 +0200 Subject: [PATCH 29/52] Warn about mpl ArrowPlot fontsize style option (#4018) --- holoviews/plotting/mpl/annotation.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/holoviews/plotting/mpl/annotation.py b/holoviews/plotting/mpl/annotation.py index 6fe08be6fd..b06e135ead 100644 --- a/holoviews/plotting/mpl/annotation.py +++ b/holoviews/plotting/mpl/annotation.py @@ -185,6 +185,9 @@ def draw_annotation(self, axis, data, opts): xytext = (0, points if direction=='v' else -points) elif direction in ['>', '<']: xytext = (points if direction=='<' else -points, 0) + if 'fontsize' in textopts: + self.param.warning('Arrow textsize style option is deprecated, ' + 'use textsize option instead.') if 'textsize' in textopts: textopts['fontsize'] = textopts.pop('textsize') return [axis.annotate(text, xy=(x, y), textcoords='offset points', From 522afa0c1d3658c7b22f888a1bb701073755ad6e Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 3 Oct 2019 19:09:43 +0200 Subject: [PATCH 30/52] Datashade fixes (#4021) --- holoviews/operation/datashader.py | 32 +++++++++++++++++------------ holoviews/plotting/bokeh/element.py | 7 +++++-- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index d68cd9e42e..22cad94009 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -25,8 +25,11 @@ from ..core.sheetcoords import BoundingBox from ..core.util import ( LooseVersion, basestring, cftime_types, cftime_to_timestamp, - datetime_types, dt_to_int, get_param_values) -from ..element import (Image, Path, Curve, RGB, Graph, TriMesh, QuadMesh, Contours) + datetime_types, dt_to_int, get_param_values, max_range) + datetime_types, dt_to_int, get_param_values, max_range) +from ..element import (Image, Path, Curve, RGB, Graph, TriMesh, + QuadMesh, Contours, Spikes, Area, Spread, + Scatter, Points) from ..streams import RangeXY, PlotSize ds_version = LooseVersion(ds.__version__) @@ -604,12 +607,12 @@ def _process(self, element, key=None): if isinstance(y0, datetime_types): y0, y1 = dt_to_int(y0, 'ns'), dt_to_int(y1, 'ns') exspan, eyspan = (x1-x0), (y1-y0) - if np.isfinite(exspan) and exspan > 0: - width = min([int((xspan/exspan) * len(coords[0])), width]) + if np.isfinite(exspan) and exspan > 0 and xspan > 0: + width = max([min([int((xspan/exspan) * len(coords[0])), width]), 1]) else: width = 0 - if np.isfinite(eyspan) and eyspan > 0: - height = min([int((yspan/eyspan) * len(coords[1])), height]) + if np.isfinite(eyspan) and eyspan > 0 and yspan > 0: + height = max([min([int((yspan/eyspan) * len(coords[1])), height]), 1]) else: height = 0 xunit = float(xspan)/width if width else 0 @@ -628,8 +631,10 @@ def _process(self, element, key=None): params = dict(bounds=bbox) if width == 0 or height == 0: - if width == 0: params['xdensity'] = 1 - if height == 0: params['ydensity'] = 1 + if width == 0: + params['xdensity'] = 1 + if height == 0: + params['ydensity'] = 1 return element.clone((xs, ys, np.zeros((height, width))), **params) cvs = ds.Canvas(plot_width=width, plot_height=height, @@ -838,13 +843,14 @@ class rasterize(AggregationOperation): (TriMesh, trimesh_rasterize), (QuadMesh, quadmesh_rasterize), (lambda x: (isinstance(x, NdOverlay) and - issubclass(x.type, Dataset) - and not issubclass(x.type, Image)), + issubclass(x.type, (Scatter, Points, Curve, Path))), aggregate), (Contours, contours_rasterize), - (lambda x: (isinstance(x, Dataset) and - (not isinstance(x, Image))), - aggregate)] + (Graph, aggregate), + (Scatter, aggregate), + (Points, aggregate), + (Curve, aggregate), + (Path, aggregate)] def _process(self, element, key=None): for predicate, transform in self._transforms: diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index eb8fddcb07..3bdeedb2f7 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -497,12 +497,15 @@ def _plot_properties(self, key, element): # Sync the plot size on dynamic plots to support accurate # scaling of dimension ranges plot_size = [s for s in self.streams if isinstance(s, PlotSize)] + callbacks = [c for c in self.callbacks if isinstance(c, PlotSizeCallback)] if plot_size: stream = plot_size[0] - stream.add_subscriber(self._update_size) + elif callbacks: + stream = callbacks[0].streams[0] else: - stream = PlotSize(subscribers=[self._update_size]) + stream = PlotSize() self.callbacks.append(PlotSizeCallback(self, [stream], None)) + stream.add_subscriber(self._update_size) plot_props = { 'align': self.align, From 0f1560d4472e185460eb5e74b2820e2f7627b0a3 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Thu, 3 Oct 2019 19:29:19 +0200 Subject: [PATCH 31/52] Fix .to.distribution (#4023) --- holoviews/element/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/holoviews/element/__init__.py b/holoviews/element/__init__.py index a4f51d6a00..411101e78a 100644 --- a/holoviews/element/__init__.py +++ b/holoviews/element/__init__.py @@ -43,6 +43,8 @@ def distribution(self, dim=None, groupby=[], **kwargs): 'if no value dimensions are defined ') if groupby: reindexed = self._element.reindex(groupby, [dim]) + kwargs['kdims'] = dim + kwargs['vdims'] = None return reindexed.groupby(groupby, HoloMap, Distribution, **kwargs) else: element = self._element From b0bf2a8c597e6ebb8ebff800986529955eb3c09f Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 4 Oct 2019 14:50:46 +0200 Subject: [PATCH 32/52] Fix issues in .apply (#4025) --- holoviews/core/accessors.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/holoviews/core/accessors.py b/holoviews/core/accessors.py index 70fe76d9dd..36717838a1 100644 --- a/holoviews/core/accessors.py +++ b/holoviews/core/accessors.py @@ -65,6 +65,8 @@ def __call__(self, function, streams=[], link_inputs=True, dynamic=None, **kwarg 'and setting dynamic=False is only ' 'possible if key dimensions define ' 'a discrete parameter space.') + if not len(samples): + return self._obj[samples] return HoloMap(self._obj[samples]).apply( function, streams, link_inputs, dynamic, **kwargs) @@ -92,11 +94,13 @@ def function(object, **kwargs): ) if dynamic is None: - dynamic = (bool(streams) or isinstance(self._obj, DynamicMap) or - util.is_param_method(function, has_deps=True) or - params or dependent_kws) + is_dynamic = (bool(streams) or isinstance(self._obj, DynamicMap) or + util.is_param_method(function, has_deps=True) or + params or dependent_kws) + else: + is_dynamic = dynamic - if (applies or isinstance(self._obj, HoloMap)) and dynamic: + if (applies or isinstance(self._obj, HoloMap)) and is_dynamic: return Dynamic(self._obj, operation=function, streams=streams, kwargs=kwargs, link_inputs=link_inputs) elif applies: From d77c7db8f4fd2440f239374104236a983963512a Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 4 Oct 2019 18:04:20 +0200 Subject: [PATCH 33/52] d8b463a3a9287296d35fbc53565fcfb7ccc2cfdf --- holoviews/operation/datashader.py | 233 +++++++++++++++--------------- 1 file changed, 120 insertions(+), 113 deletions(-) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index 22cad94009..b3eaf99758 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -21,7 +21,7 @@ from ..core import (Operation, Element, Dimension, NdOverlay, CompositeOverlay, Dataset, Overlay) -from ..core.data import PandasInterface, XArrayInterface +from ..core.data import PandasInterface, XArrayInterface, DaskInterface from ..core.sheetcoords import BoundingBox from ..core.util import ( LooseVersion, basestring, cftime_types, cftime_to_timestamp, @@ -117,16 +117,34 @@ def instance(self_or_cls,**params): def _get_sampling(self, element, x, y): target = self.p.target + if not isinstance(x, list) and x is not None: + x = [x] + if not isinstance(y, list) and y is not None: + y = [y] + if target: - x_range, y_range = target.range(x), target.range(y) + x0, y0, x1, y1 = target.bounds.lbrt() + x_range, y_range = (x0, x1), (y0, y1) height, width = target.dimension_values(2, flat=False).shape else: - if x is None or y is None: + if x is None: x_range = self.p.x_range or (-0.5, 0.5) y_range = self.p.y_range or (-0.5, 0.5) else: - if self.p.expand or not self.p.x_range: - x_range = self.p.x_range or element.range(x) + x0, x1 = self.p.x_range + ex0, ex1 = max_range([element.range(xd) for xd in x]) + x_range = (np.min([np.max([x0, ex0]), ex1]), + np.max([np.min([x1, ex1]), ex0])) + + if (y is None and ndim == 2): + y_range = self.p.y_range or default or (-0.5, 0.5) + elif self.p.expand or not self.p.y_range: + y_range = self.p.y_range or (max_range([element.range(yd) for yd in y]) + if default is None else default) + else: + y0, y1 = self.p.y_range + if default is None: + ey0, ey1 = max_range([element.range(yd) for yd in y]) else: x0, x1 = self.p.x_range ex0, ex1 = element.range(x) @@ -149,7 +167,7 @@ def _get_sampling(self, element, x, y): xtype = 'datetime' elif not np.isfinite(xstart) and not np.isfinite(xend): xstart, xend = 0, 0 - if element.get_dimension_type(x) in datetime_types: + if x and element.get_dimension_type(x[0]) in datetime_types: xtype = 'datetime' x_range = (xstart, xend) @@ -159,7 +177,7 @@ def _get_sampling(self, element, x, y): ytype = 'datetime' elif not np.isfinite(ystart) and not np.isfinite(yend): ystart, yend = 0, 0 - if element.get_dimension_type(y) in datetime_types: + if y and element.get_dimension_type(y[0]) in datetime_types: ytype = 'datetime' y_range = (ystart, yend) @@ -343,28 +361,105 @@ def get_agg_data(cls, obj, category=None): return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph - def _aggregate_ndoverlay(self, element, agg_fn): - """ - Optimized aggregation for NdOverlay objects by aggregating each - Element in an NdOverlay individually avoiding having to concatenate - items in the NdOverlay. Works by summing sum and count aggregates and - applying appropriate masking for NaN values. Mean aggregation - is also supported by dividing sum and count aggregates. count_cat - aggregates are grouped by the categorical dimension and a separate - aggregate for each category is generated. - """ - # Compute overall bounds - x, y = element.last.dimensions()[0:2] - info = self._get_sampling(element, x, y) - (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info + def _process(self, element, key=None): + agg_fn = self._get_aggregator(element) + category = agg_fn.column if isinstance(agg_fn, ds.count_cat) else None + + if overlay_aggregate.applies(element, agg_fn): + params = dict( + {p: v for p, v in self.get_param_values() if p != 'name'}, + dynamic=False, **{p: v for p, v in self.p.items() + if p not in ('name', 'dynamic')}) + return overlay_aggregate(element, **params) + + if element._plot_id in self._precomputed: + x, y, data, glyph = self._precomputed[element._plot_id] + else: + x, y, data, glyph = self.get_agg_data(element, category) + + if self.p.precompute: + self._precomputed[element._plot_id] = x, y, data, glyph + (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = self._get_sampling(element, x, y) + ((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype) + + params = self._get_agg_params(element, x, y, agg_fn, (x0, y0, x1, y1)) + + if x is None or y is None or width == 0 or height == 0: + return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params) + elif not getattr(data, 'interface', None) is DaskInterface and not len(data): + empty_val = 0 if isinstance(agg_fn, ds.count) else np.NaN + xarray = xr.DataArray(np.full((height, width), empty_val), + dims=[y.name, x.name], coords={x.name: xs, y.name: ys}) + return self.p.element_type(xarray, **params) + + cvs = ds.Canvas(plot_width=width, plot_height=height, + x_range=x_range, y_range=y_range) + + dfdata = PandasInterface.as_dframe(data) + agg = getattr(cvs, glyph)(dfdata, x.name, y.name, agg_fn) + if 'x_axis' in agg.coords and 'y_axis' in agg.coords: + agg = agg.rename({'x_axis': x, 'y_axis': y}) if xtype == 'datetime': - x_range = tuple((np.array(x_range)/1e3).astype('datetime64[us]')) + agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]') if ytype == 'datetime': - y_range = tuple((np.array(y_range)/1e3).astype('datetime64[us]')) + agg[y.name] = (agg[y.name]/1e3).astype('datetime64[us]') + + if agg.ndim == 2: + # Replacing x and y coordinates to avoid numerical precision issues + eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data) + return self.p.element_type(eldata, **params) + else: + layers = {} + for c in agg.coords[agg_fn.column].data: + cagg = agg.sel(**{agg_fn.column: c}) + eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data) + layers[c] = self.p.element_type(eldata, **params) + return NdOverlay(layers, kdims=[data.get_dimension(agg_fn.column)]) + + + +class overlay_aggregate(aggregate): + """ + Optimized aggregation for NdOverlay objects by aggregating each + Element in an NdOverlay individually avoiding having to concatenate + items in the NdOverlay. Works by summing sum and count aggregates and + applying appropriate masking for NaN values. Mean aggregation + is also supported by dividing sum and count aggregates. count_cat + aggregates are grouped by the categorical dimension and a separate + aggregate for each category is generated. + """ + + @classmethod + def applies(cls, element, agg_fn): + return (isinstance(element, NdOverlay) and + ((isinstance(agg_fn, (ds.count, ds.sum, ds.mean)) and + (agg_fn.column is None or agg_fn.column not in element.kdims)) or + (isinstance(agg_fn, ds.count_cat) and agg_fn.column in element.kdims))) + + + def _process(self, element, key=None): + agg_fn = self._get_aggregator(element) + + if not self.applies(element, agg_fn): + raise ValueError('overlay_aggregate only handles aggregation ' + 'of NdOverlay types with count, sum or mean ' + 'reduction.') + + # Compute overall bounds + dims = element.last.dimensions()[0:2] + ndims = len(dims) + if ndims == 1: + x, y = dims[0], None + else: + x, y = dims + + info = self._get_sampling(element, x, y, ndims) + (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info + ((x0, x1), (y0, y1)), _ = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype) agg_params = dict({k: v for k, v in dict(self.get_param_values(), **self.p).items() if k in aggregate.param}, - x_range=x_range, y_range=y_range) - bbox = BoundingBox(points=[(x_range[0], y_range[0]), (x_range[1], y_range[1])]) + x_range=(x0, x1), y_range=(y0, y1)) + bbox = (x0, y0, x1, y1) # Optimize categorical counts by aggregating them individually if isinstance(agg_fn, ds.count_cat): @@ -426,94 +521,6 @@ def _aggregate_ndoverlay(self, element, agg_fn): return agg.clone(bounds=bbox) - def _process(self, element, key=None): - agg_fn = self._get_aggregator(element) - category = agg_fn.column if isinstance(agg_fn, ds.count_cat) else None - - if (isinstance(element, NdOverlay) and - ((isinstance(agg_fn, (ds.count, ds.sum, ds.mean)) and - (agg_fn.column is None or agg_fn.column not in element.kdims)) or - (isinstance(agg_fn, ds.count_cat) and agg_fn.column in element.kdims))): - return self._aggregate_ndoverlay(element, agg_fn) - - if element._plot_id in self._precomputed: - x, y, data, glyph = self._precomputed[element._plot_id] - else: - x, y, data, glyph = self.get_agg_data(element, category) - - if self.p.precompute: - self._precomputed[element._plot_id] = x, y, data, glyph - (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = self._get_sampling(element, x, y) - - (x0, x1), (y0, y1) = x_range, y_range - if xtype == 'datetime': - x0, x1 = (np.array([x0, x1])/1e3).astype('datetime64[us]') - xs = (xs/1e3).astype('datetime64[us]') - if ytype == 'datetime': - y0, y1 = (np.array([y0, y1])/1e3).astype('datetime64[us]') - ys = (ys/1e3).astype('datetime64[us]') - bounds = (x0, y0, x1, y1) - params = dict(get_param_values(element), kdims=[x, y], - datatype=['xarray'], bounds=bounds) - - column = agg_fn.column if agg_fn else None - if column: - dims = [d for d in element.dimensions('ranges') if d == column] - if not dims: - raise ValueError("Aggregation column %s not found on %s element. " - "Ensure the aggregator references an existing " - "dimension." % (column,element)) - name = '%s Count' % column if isinstance(agg_fn, ds.count_cat) else column - vdims = [dims[0](name)] - else: - vdims = Dimension('Count') - params['vdims'] = vdims - - if x is None or y is None or width == 0 or height == 0: - x = x.name if x else 'x' - y = y.name if x else 'y' - xarray = xr.DataArray(np.full((height, width), np.NaN), - dims=[y, x], coords={x: xs, y: ys}) - if width == 0: - params['xdensity'] = 1 - if height == 0: - params['ydensity'] = 1 - el = self.p.element_type(xarray, **params) - if isinstance(agg_fn, ds.count_cat): - vals = element.dimension_values(agg_fn.column, expanded=False) - dim = element.get_dimension(agg_fn.column) - return NdOverlay({v: el for v in vals}, dim) - return el - elif not len(data): - xarray = xr.DataArray(np.full((height, width), np.NaN), - dims=[y.name, x.name], coords={x.name: xs, y.name: ys}) - return self.p.element_type(xarray, **params) - - cvs = ds.Canvas(plot_width=width, plot_height=height, - x_range=x_range, y_range=y_range) - - dfdata = PandasInterface.as_dframe(data) - agg = getattr(cvs, glyph)(dfdata, x.name, y.name, agg_fn) - if 'x_axis' in agg.coords and 'y_axis' in agg.coords: - agg = agg.rename({'x_axis': x, 'y_axis': y}) - if xtype == 'datetime': - agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]') - if ytype == 'datetime': - agg[y.name] = (agg[y.name]/1e3).astype('datetime64[us]') - - if agg.ndim == 2: - # Replacing x and y coordinates to avoid numerical precision issues - eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data) - return self.p.element_type(eldata, **params) - else: - layers = {} - for c in agg.coords[column].data: - cagg = agg.sel(**{column: c}) - eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data) - layers[c] = self.p.element_type(eldata, **dict(params, vdims=vdims)) - return NdOverlay(layers, kdims=[data.get_dimension(column)]) - - class regrid(AggregationOperation): """ From a12ebc3a20250ce899ae3aaa973fd6f64625a0df Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 4 Oct 2019 18:58:12 +0200 Subject: [PATCH 34/52] Fixes for user guides (#4027) --- examples/user_guide/15-Large_Data.ipynb | 8 +++++--- examples/user_guide/17-Dashboards.ipynb | 10 +++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/examples/user_guide/15-Large_Data.ipynb b/examples/user_guide/15-Large_Data.ipynb index 2569ec7612..4f3f6ae09f 100644 --- a/examples/user_guide/15-Large_Data.ipynb +++ b/examples/user_guide/15-Large_Data.ipynb @@ -21,10 +21,12 @@ "metadata": {}, "outputs": [], "source": [ + "import datashader as ds\n", "import numpy as np\n", "import holoviews as hv\n", + "\n", "from holoviews import opts\n", - "import datashader as ds\n", + "\n", "from holoviews.operation.datashader import datashade, shade, dynspread, rasterize\n", "from holoviews.operation import decimate\n", "\n", @@ -473,7 +475,7 @@ "rasterizable = [hv.RGB(np.dstack([r,g,b])), hv.HSV(np.dstack([g,b,r]))]\n", "\n", "hv.Layout([dynspread(datashade(e.relabel(e.__class__.name))) for e in shadeable] + \n", - " [ rasterize(e.relabel(e.__class__.name)) for e in rasterizable]).cols(6)" + " [ rasterize(e.relabel(e.__class__.name)) for e in rasterizable]).opts(shared_axes=False).cols(6)" ] }, { @@ -570,5 +572,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/examples/user_guide/17-Dashboards.ipynb b/examples/user_guide/17-Dashboards.ipynb index f2fdcd7229..3c18049e5b 100644 --- a/examples/user_guide/17-Dashboards.ipynb +++ b/examples/user_guide/17-Dashboards.ipynb @@ -38,7 +38,7 @@ "def load_symbol(symbol, variable='adj_close', **kwargs):\n", " df = pd.DataFrame(getattr(stocks, symbol))\n", " df['date'] = df.date.astype('datetime64[ns]')\n", - " return hv.Curve(df, ('date', 'Date'), variable)\n", + " return hv.Curve(df, ('date', 'Date'), variable).opts(framewise=True)\n", "\n", "stock_symbols = ['AAPL', 'IBM', 'FB', 'GOOG', 'MSFT']\n", "dmap = hv.DynamicMap(load_symbol, kdims='Symbol').redim.values(Symbol=stock_symbols)\n", @@ -100,7 +100,7 @@ "\n", "stock_dmap = hv.DynamicMap(explorer.load_symbol)\n", "\n", - "pn.Row(explorer.param, stock_dmap)" + "pn.Row(pn.panel(explorer.param, parameters=['symbol', 'variable']), stock_dmap)" ] }, { @@ -153,9 +153,9 @@ "\n", "dmap = hv.DynamicMap(load_symbol_cb)\n", "\n", - "smoothed = rolling(stock_dmap, rolling_window=rolling_window.param.value)\n", + "smoothed = rolling(dmap, rolling_window=rolling_window.param.value)\n", "\n", - "pn.Row(pn.WidgetBox('## Stock Explorer', symbol, variable, window), smoothed.opts(width=500))" + "pn.Row(pn.WidgetBox('## Stock Explorer', symbol, variable, rolling_window), smoothed.opts(width=500, framewise=True))" ] }, { @@ -226,5 +226,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } From a980ed52d5c80c2680fc2aae76f722d3c401be66 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 4 Oct 2019 19:10:10 +0200 Subject: [PATCH 35/52] Add support for fast QuadMesh rasterization (#4020) --- holoviews/core/data/__init__.py | 12 +++++- holoviews/core/data/interface.py | 19 +++++---- holoviews/operation/datashader.py | 43 ++++++++++++++++++++- holoviews/tests/operation/testdatashader.py | 4 +- 4 files changed, 67 insertions(+), 11 deletions(-) diff --git a/holoviews/core/data/__init__.py b/holoviews/core/data/__init__.py index 879a3a17b2..008cf53922 100644 --- a/holoviews/core/data/__init__.py +++ b/holoviews/core/data/__init__.py @@ -836,19 +836,29 @@ def to(self): return self._conversion_interface(self) - def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): + def clone(self, data=None, shared_data=True, new_type=None, link=True, + *args, **overrides): """Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to + link (bool, optional): Whether clone should be linked + Determines whether Streams and Links attached to + original object will be inherited. *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object """ + if data is None and shared_data: + # Allows datatype conversions + data = self + if link: + overrides['plot_id'] = self._plot_id + if 'datatype' not in overrides: datatypes = [self.interface.datatype] + self.datatype overrides['datatype'] = list(util.unique_iterator(datatypes)) diff --git a/holoviews/core/data/interface.py b/holoviews/core/data/interface.py index ff5cd25cd5..63f19e7516 100644 --- a/holoviews/core/data/interface.py +++ b/holoviews/core/data/interface.py @@ -7,7 +7,7 @@ from .. import util from ..element import Element -from ..ndmapping import OrderedDict, NdMapping +from ..ndmapping import NdMapping def get_array_types(): @@ -198,14 +198,19 @@ def initialize(cls, eltype, data, kdims, vdims, datatype=None): if not datatype: datatype = eltype.datatype - if data.interface.datatype in datatype and data.interface.datatype in eltype.datatype: + interface = data.interface + if interface.datatype in datatype and interface.datatype in eltype.datatype: data = data.data - elif data.interface.gridded and any(cls.interfaces[dt].gridded for dt in datatype): - gridded = OrderedDict([(kd.name, data.dimension_values(kd.name, expanded=False)) - for kd in data.kdims]) + elif interface.gridded and any(cls.interfaces[dt].gridded for dt in datatype): + new_data = [] + for kd in data.kdims: + irregular = interface.irregular(data, kd) + coords = data.dimension_values(kd.name, expanded=irregular, + flat=not irregular) + new_data.append(coords) for vd in data.vdims: - gridded[vd.name] = data.dimension_values(vd, flat=False) - data = tuple(gridded.values()) + new_data.append(interface.values(data, vd, flat=False, compute=False)) + data = tuple(new_data) else: data = tuple(data.columns().values()) elif isinstance(data, Element): diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index b3eaf99758..7c89f1e376 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -810,7 +810,48 @@ class quadmesh_rasterize(trimesh_rasterize): """ def _precompute(self, element, agg): - return super(quadmesh_rasterize, self)._precompute(element.trimesh(), agg) + if ds_version <= '0.7.0': + return super(quadmesh_rasterize, self)._precompute(element.trimesh(), agg) + + def _process(self, element, key=None): + if ds_version <= '0.7.0': + return super(quadmesh_rasterize, self)._process(element, key) + + if element.interface.datatype != 'xarray': + element = element.clone(datatype=['xarray']) + data = element.data + + x, y = element.kdims + agg_fn = self._get_aggregator(element) + info = self._get_sampling(element, x, y) + (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info + if xtype == 'datetime': + data[x.name] = data[x.name].astype('datetime64[us]').astype('int64') + if ytype == 'datetime': + data[y.name] = data[y.name].astype('datetime64[us]').astype('int64') + + # Compute bounds (converting datetimes) + ((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform( + x_range, y_range, xs, ys, xtype, ytype + ) + params = dict(get_param_values(element), datatype=['xarray'], + bounds=(x0, y0, x1, y1)) + + if width == 0 or height == 0: + return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params) + + cvs = ds.Canvas(plot_width=width, plot_height=height, + x_range=x_range, y_range=y_range) + + vdim = getattr(agg_fn, 'column', element.vdims[0].name) + agg = cvs.quadmesh(data[vdim], x.name, y.name, agg_fn) + xdim, ydim = list(agg.dims)[:2][::-1] + if xtype == "datetime": + agg[xdim] = (agg[xdim]/1e3).astype('datetime64[us]') + if ytype == "datetime": + agg[ydim] = (agg[ydim]/1e3).astype('datetime64[us]') + + return Image(agg, **params) diff --git a/holoviews/tests/operation/testdatashader.py b/holoviews/tests/operation/testdatashader.py index 2f0c90bb00..9f3b06b0d4 100644 --- a/holoviews/tests/operation/testdatashader.py +++ b/holoviews/tests/operation/testdatashader.py @@ -408,14 +408,14 @@ def test_rasterize_trimesh_string_aggregator(self): def test_rasterize_quadmesh(self): qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]]))) img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z')) - image = Image(np.array([[2., 3., np.NaN], [0, 1, np.NaN], [np.NaN, np.NaN, np.NaN]]), + image = Image(np.array([[2, 3, 3], [2, 3, 3], [0, 1, 1]]), bounds=(-.5, -.5, 1.5, 1.5)) self.assertEqual(img, image) def test_rasterize_quadmesh_string_aggregator(self): qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]]))) img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator='mean') - image = Image(np.array([[2., 3., np.NaN], [0, 1, np.NaN], [np.NaN, np.NaN, np.NaN]]), + image = Image(np.array([[2, 3, 3], [2, 3, 3], [0, 1, 1]]), bounds=(-.5, -.5, 1.5, 1.5)) self.assertEqual(img, image) From 05ba7373db883cb103f9102c4a3472b657281577 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Fri, 4 Oct 2019 19:49:56 +0200 Subject: [PATCH 36/52] Fixed flake --- holoviews/tests/plotting/bokeh/testlayoutplot.py | 1 - 1 file changed, 1 deletion(-) diff --git a/holoviews/tests/plotting/bokeh/testlayoutplot.py b/holoviews/tests/plotting/bokeh/testlayoutplot.py index 6d01fb499e..28bcebe11a 100644 --- a/holoviews/tests/plotting/bokeh/testlayoutplot.py +++ b/holoviews/tests/plotting/bokeh/testlayoutplot.py @@ -299,7 +299,6 @@ def cb(aname): self.assertEqual(stream._subscribers, []) def test_layout_shared_axes_disabled(self): - from holoviews.plotting.bokeh import CurvePlot layout = (Curve([1, 2, 3]) + Curve([10, 20, 30])).opts(shared_axes=False) plot = bokeh_renderer.get_plot(layout) cp1, cp2 = plot.subplots[(0, 0)].subplots['main'], plot.subplots[(0, 1)].subplots['main'] From 8fa325f26c2639312a48f2fe044a343f81613085 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Sat, 5 Oct 2019 00:39:10 +0200 Subject: [PATCH 37/52] Allow passing widget as dependency (#4028) --- holoviews/core/util.py | 4 ++++ holoviews/util/__init__.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/holoviews/core/util.py b/holoviews/core/util.py index f1ce35a38e..6b54907536 100644 --- a/holoviews/core/util.py +++ b/holoviews/core/util.py @@ -1500,6 +1500,10 @@ def resolve_dependent_kwargs(kwargs): """ resolved = {} for k, v in kwargs.items(): + if 'panel' in sys.modules: + from panel.widgets.base import Widget + if isinstance(v, Widget): + v = v.param.value if is_param_method(v, has_deps=True): v = v() elif isinstance(v, param.Parameter) and isinstance(v.owner, param.Parameterized): diff --git a/holoviews/util/__init__.py b/holoviews/util/__init__.py index 8278805968..d5c1f8d0b9 100644 --- a/holoviews/util/__init__.py +++ b/holoviews/util/__init__.py @@ -878,8 +878,14 @@ def _get_streams(self, map_obj, watch=True): stream.update(**{reverse.get(k, k): v for k, v in updates.items()}) streams.append(stream) - params = {k: v for k, v in self.p.kwargs.items() if isinstance(v, param.Parameter) - and isinstance(v.owner, param.Parameterized)} + params = {} + for k, v in self.p.kwargs.items(): + if 'panel' in sys.modules: + from panel.widgets.base import Widget + if isinstance(v, Widget): + v = v.param.value + if isinstance(v, param.Parameter) and isinstance(v.owner, param.Parameterized): + params[k] = v streams += Params.from_params(params) # Inherit dimensioned streams From 7faf5acb63ac411cbbacfb5123ec3ffafc728850 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Sat, 5 Oct 2019 13:40:34 +0200 Subject: [PATCH 38/52] Fixed issues with matplotlib AxesImage legend (#4031) --- holoviews/plotting/mpl/element.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index fd81cb9705..fb65f1de47 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -10,6 +10,7 @@ from matplotlib import ticker from matplotlib.dates import date2num +from matplotlib.image import AxesImage from ...core import util from ...core import (OrderedDict, NdOverlay, DynamicMap, Dataset, @@ -1054,6 +1055,9 @@ def _adjust_legend(self, overlay, axis): for handle, label in zip(all_handles, all_labels): # Ensure that artists with multiple handles are supported if isinstance(handle, list): handle = tuple(handle) + handle = tuple(h for h in handle if not isinstance(h, AxesImage)) + if not handle: + continue if handle and (handle not in data) and label and label not in used_labels: data[handle] = label used_labels.append(label) From 6185407c875b8adf93d28c7e2a25a6890c87bec7 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Mon, 7 Oct 2019 01:43:41 +0200 Subject: [PATCH 39/52] Handle unqualified Options objects (#4032) --- holoviews/core/accessors.py | 11 +++++++++-- holoviews/core/options.py | 4 +++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/holoviews/core/accessors.py b/holoviews/core/accessors.py index 36717838a1..0f48eee61c 100644 --- a/holoviews/core/accessors.py +++ b/holoviews/core/accessors.py @@ -452,7 +452,14 @@ def _dynamicmap_opts(self, *args, **kwargs): def _base_opts(self, *args, **kwargs): - apply_groups, options, new_kwargs = util.deprecated_opts_signature(args, kwargs) + from .options import Options + + new_args = [] + for arg in args: + if isinstance(arg, Options) and arg.key is None: + arg = arg(key=type(self._obj).__name__) + new_args.append(arg) + apply_groups, options, new_kwargs = util.deprecated_opts_signature(new_args, kwargs) # By default do not clone in .opts method clone = kwargs.get('clone', None) @@ -463,4 +470,4 @@ def _base_opts(self, *args, **kwargs): return opts.apply_groups(self._obj, **dict(kwargs, **new_kwargs)) kwargs['clone'] = False if clone is None else clone - return self._obj.options(*args, **kwargs) + return self._obj.options(*new_args, **kwargs) diff --git a/holoviews/core/options.py b/holoviews/core/options.py index 4d2fce1be2..ca704a196f 100644 --- a/holoviews/core/options.py +++ b/holoviews/core/options.py @@ -490,9 +490,11 @@ def __call__(self, allowed_keywords=None, **kwargs): """ Create a new Options object that inherits the parent options. """ + if 'key' not in kwargs: + kwargs['key'] = self.key allowed_keywords=self.allowed_keywords if allowed_keywords in [None,[]] else allowed_keywords inherited_style = dict(allowed_keywords=allowed_keywords, **kwargs) - return self.__class__(key=self.key, **dict(self.kwargs, **inherited_style)) + return self.__class__(**dict(self.kwargs, **inherited_style)) def keys(self): From af843275ee2bc43e9c85ecc89beccbde5a597472 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Mon, 7 Oct 2019 18:15:56 +0200 Subject: [PATCH 40/52] Fix Image element bounds check (#4035) --- holoviews/element/raster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/holoviews/element/raster.py b/holoviews/element/raster.py index a8e6def795..09a69a1742 100644 --- a/holoviews/element/raster.py +++ b/holoviews/element/raster.py @@ -312,8 +312,9 @@ def __init__(self, data, kdims=None, vdims=None, bounds=None, extents=None, bounds = BoundingBox(points=((l, b), (r, t))) data_bounds = None - if self.interface is ImageInterface and not isinstance(data, np.ndarray): + if self.interface is ImageInterface and not isinstance(data, (np.ndarray, Image)): data_bounds = self.bounds.lbrt() + l, b, r, t = bounds.lbrt() xdensity = xdensity if xdensity else util.compute_density(l, r, dim1, self._time_unit) ydensity = ydensity if ydensity else util.compute_density(b, t, dim2, self._time_unit) From 9ca256ee378d5db20516a4189e96fdb58c9b332d Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Mon, 7 Oct 2019 23:04:50 +0200 Subject: [PATCH 41/52] Fixed compatibility with new bokeh 1.4 legend options (#4036) --- holoviews/plotting/bokeh/annotation.py | 2 +- holoviews/plotting/bokeh/chart.py | 11 ++++++----- holoviews/plotting/bokeh/element.py | 20 +++++++++++++------- holoviews/plotting/bokeh/graphs.py | 3 ++- holoviews/plotting/bokeh/path.py | 3 ++- holoviews/plotting/bokeh/stats.py | 8 +++++--- 6 files changed, 29 insertions(+), 18 deletions(-) diff --git a/holoviews/plotting/bokeh/annotation.py b/holoviews/plotting/bokeh/annotation.py index c307c01322..715d56e169 100644 --- a/holoviews/plotting/bokeh/annotation.py +++ b/holoviews/plotting/bokeh/annotation.py @@ -251,7 +251,7 @@ def _init_glyph(self, plot, mapping, properties, key): """ Returns a Bokeh glyph object. """ - properties.pop('legend', None) + properties = {k: v for k, v in properties.items() if 'legend' not in k} if key == 'arrow': properties.pop('source') arrow_end = mapping.pop('arrow_end') diff --git a/holoviews/plotting/bokeh/chart.py b/holoviews/plotting/bokeh/chart.py index 3b9eb6e7c3..095178c2d2 100644 --- a/holoviews/plotting/bokeh/chart.py +++ b/holoviews/plotting/bokeh/chart.py @@ -20,7 +20,7 @@ from .element import ElementPlot, ColorbarPlot, LegendPlot from .styles import (expand_batched_style, line_properties, fill_properties, mpl_to_bokeh, rgb2hex) -from .util import categorize_array +from .util import bokeh_version, categorize_array class PointPlot(LegendPlot, ColorbarPlot): @@ -532,7 +532,7 @@ def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ - properties.pop('legend', None) + properties = {k: v for k, v in properties.items() if 'legend' not in k} for prop in ['color', 'alpha']: if prop not in properties: continue @@ -897,13 +897,14 @@ def _add_color_data(self, ds, ranges, style, cdim, data, mapping, factors, color # Enable legend if colormapper is categorical cmapper = cmapping['color']['transform'] + legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' if ('color' in cmapping and self.show_legend and isinstance(cmapper, CategoricalColorMapper)): - mapping['legend'] = cdim.name + mapping[legend_prop] = cdim.name if not (self.stacked or self.stack_index) and ds.ndims > 1: - cmapping.pop('legend', None) - mapping.pop('legend', None) + cmapping.pop(legend_prop, None) + mapping.pop(legend_prop, None) # Merge data and mappings mapping.update(cmapping) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index 3bdeedb2f7..633cab66c3 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -36,7 +36,7 @@ legend_dimensions, line_properties, mpl_to_bokeh, property_prefixes, rgba_tuple, text_properties, validate) from .util import ( - TOOL_TYPES, date_to_integer, decode_bytes, get_tab_title, + TOOL_TYPES, bokeh_version, date_to_integer, decode_bytes, get_tab_title, glyph_order, py2js_tickformatter, recursive_model_update, theme_attr_json, cds_column_replace, hold_policy, match_dim_specs, compute_layout_properties, wrap_formatter) @@ -1021,7 +1021,7 @@ def _apply_transforms(self, element, data, ranges, style, group=None): data[k] = val # If color is not valid colorspec add colormapper - numeric = isinstance(val, util.arraylike_types) and val.dtype.kind in 'uifMm' + numeric = isinstance(val, util.arraylike_types) and val.dtype.kind in 'uifMmb' if ('color' in k and isinstance(val, util.arraylike_types) and (numeric or not validate('color', val))): kwargs = {} @@ -1031,12 +1031,14 @@ def _apply_transforms(self, element, data, ranges, style, group=None): factors = ranges[range_key]['factors'] else: factors = util.unique_array(val) + if isinstance(val, util.arraylike_types) and val.dtype.kind == 'b': + factors = factors.astype(str) kwargs['factors'] = factors cmapper = self._get_colormapper(v, element, ranges, dict(style), name=k+'_color_mapper', group=group, **kwargs) categorical = isinstance(cmapper, CategoricalColorMapper) - if categorical and val.dtype.kind in 'ifMu': + if categorical and val.dtype.kind in 'ifMub': if v.dimension in element: formatter = element.get_dimension(v.dimension).pprint_value else: @@ -1046,7 +1048,8 @@ def _apply_transforms(self, element, data, ranges, style, group=None): else: field = k if categorical and getattr(self, 'show_legend', False): - new_style['legend'] = field + legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' + new_style[legend_prop] = field key = {'field': field, 'transform': cmapper} new_style[k] = key @@ -1096,7 +1099,8 @@ def _glyph_properties(self, plot, element, source, ranges, style, group=None): else: legend = element.label if legend and self.overlaid: - properties['legend'] = value(legend) + legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend' + properties[legend_prop] = value(legend) return properties @@ -1123,7 +1127,8 @@ def _update_glyph(self, renderer, properties, mapping, glyph, source, data): allowed_properties = glyph.properties() properties = mpl_to_bokeh(properties) merged = dict(properties, **mapping) - legend = merged.pop('legend', None) + legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend' + legend = merged.pop(legend_prop, None) columns = list(source.data.keys()) glyph_updates = [] for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'): @@ -1752,7 +1757,8 @@ def _get_color_data(self, element, ranges, style, name='color', factors=None, co data[field] = cdata if factors is not None and self.show_legend: - mapping['legend'] = {'field': field} + legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' + mapping[legend_prop] = {'field': field} mapping[name] = {'field': field, 'transform': mapper} return data, mapping diff --git a/holoviews/plotting/bokeh/graphs.py b/holoviews/plotting/bokeh/graphs.py index 89df1004e9..add2c7390f 100644 --- a/holoviews/plotting/bokeh/graphs.py +++ b/holoviews/plotting/bokeh/graphs.py @@ -300,7 +300,8 @@ def _get_graph_properties(self, plot, element, data, mapping, ranges, style): continue properties[k] = p mappings.update(mapping.pop(key, {})) - properties = {p: v for p, v in properties.items() if p not in ('legend', 'source')} + properties = {p: v for p, v in properties.items() + if p != 'source' and 'legend' not in p} properties.update(mappings) # Initialize graph layout diff --git a/holoviews/plotting/bokeh/path.py b/holoviews/plotting/bokeh/path.py index d3a33722ac..c81772f4d4 100644 --- a/holoviews/plotting/bokeh/path.py +++ b/holoviews/plotting/bokeh/path.py @@ -244,7 +244,8 @@ def get_data(self, element, ranges, style): cmapper = self._get_colormapper(cdim, element, ranges, style, factors) mapping[self._color_style] = {'field': dim_name, 'transform': cmapper} if self.show_legend: - mapping['legend'] = dim_name + legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' + mapping[legend_prop] = dim_name return data, mapping, style def _init_glyph(self, plot, mapping, properties): diff --git a/holoviews/plotting/bokeh/stats.py b/holoviews/plotting/bokeh/stats.py index 2a00673a73..61818a99f1 100644 --- a/holoviews/plotting/bokeh/stats.py +++ b/holoviews/plotting/bokeh/stats.py @@ -17,7 +17,7 @@ from .element import CompositeElementPlot, ColorbarPlot, LegendPlot from .path import PolygonPlot from .styles import fill_properties, line_properties -from .util import decode_bytes +from .util import bokeh_version, decode_bytes class DistributionPlot(AreaPlot): @@ -95,7 +95,8 @@ def _get_axis_dims(self, element): def _glyph_properties(self, plot, element, source, ranges, style, group=None): properties = dict(style, source=source) if self.show_legend and not element.kdims and self.overlaid: - properties['legend'] = element.label + legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend' + properties[legend_prop] = element.label return properties def _apply_transforms(self, element, data, ranges, style, group=None): @@ -281,7 +282,8 @@ def get_data(self, element, ranges, style): factors = list(unique_iterator(factors)) if self.show_legend: - vbar_map['legend'] = cdim.name + legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' + vbar_map[legend_prop] = cdim.name return data, mapping, style From 2867d4ae3ff2c2ce0dbfe8db428315235157cb7c Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 11:39:15 +0200 Subject: [PATCH 42/52] Raise error if data does not contain sufficient dimensions (#4040) --- holoviews/core/data/pandas.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/holoviews/core/data/pandas.py b/holoviews/core/data/pandas.py index ed25cf143d..78c95a4d55 100644 --- a/holoviews/core/data/pandas.py +++ b/holoviews/core/data/pandas.py @@ -121,7 +121,14 @@ def init(cls, eltype, data, kdims, vdims): if isinstance(data, tuple): data = [np.array(d) if not isinstance(d, np.ndarray) else d for d in data] - if not cls.expanded(data): + min_dims = (kdim_param.bounds[0] or 0) + (vdim_param.bounds[0] or 0) + if any(d.ndim > 1 for d in data): + raise ValueError('PandasInterface cannot interpret multi-dimensional arrays.') + elif len(data) < min_dims: + raise DataError('Data contains fewer columns than the %s element expects. Expected ' + 'at least %d columns but found only %d columns.' % + (eltype.__name__, min_dims, len(data))) + elif not cls.expanded(data): raise ValueError('PandasInterface expects data to be of uniform shape.') data = pd.DataFrame(dict(zip(columns, data)), columns=columns) elif ((isinstance(data, dict) and any(c not in data for c in columns)) or From 6e8ad4d46697e436f1ae5539669a49a62383e645 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 12:44:06 +0200 Subject: [PATCH 43/52] Ensure that Stream triggering state is handled correctly on bokeh server (#4041) --- holoviews/plotting/bokeh/plot.py | 15 ++++++++++++++- holoviews/plotting/plot.py | 1 + 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/holoviews/plotting/bokeh/plot.py b/holoviews/plotting/bokeh/plot.py index 8dd547e0bc..266cb60a88 100644 --- a/holoviews/plotting/bokeh/plot.py +++ b/holoviews/plotting/bokeh/plot.py @@ -178,10 +178,23 @@ def _construct_callbacks(self): def refresh(self, **kwargs): if self.renderer.mode == 'server' and curdoc() is not self.document: + self._triggering += [s for p in self.traverse(lambda x: x, [Plot]) + for s in p.streams if s._triggering] # If we do not have the Document lock, schedule refresh as callback self.document.add_next_tick_callback(self.refresh) - else: + return + + for s in self._triggering: + s._triggering = True + try: super(BokehPlot, self).refresh(**kwargs) + except Exception as e: + raise e + finally: + # Reset triggering state + for s in self._triggering: + s._triggering = False + self._triggering = [] def push(self): """ diff --git a/holoviews/plotting/plot.py b/holoviews/plotting/plot.py index 5d0b0c3c3a..a1c170d029 100644 --- a/holoviews/plotting/plot.py +++ b/holoviews/plotting/plot.py @@ -245,6 +245,7 @@ def __init__(self, keys=None, dimensions=None, layout_dimensions=None, self.comm = comm self._force = False self._updated = False # Whether the plot should be marked as updated + self._triggering = [] params = {k: v for k, v in params.items() if k in self.params()} super(DimensionedPlot, self).__init__(**params) From 982ee7ceb07aeb0e04ca5f9c6c5d5ad00afa4ed5 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 13:44:45 +0200 Subject: [PATCH 44/52] Ensure that deprecated mpl rcparam options are not applied (#4042) --- holoviews/plotting/mpl/renderer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/holoviews/plotting/mpl/renderer.py b/holoviews/plotting/mpl/renderer.py index 10e864564b..d4b598b945 100644 --- a/holoviews/plotting/mpl/renderer.py +++ b/holoviews/plotting/mpl/renderer.py @@ -280,7 +280,13 @@ def _compute_bbox(self, fig, kw): @classmethod @contextmanager def state(cls): - deprecated = ['text.latex.unicode', 'examples.directory'] + deprecated = [ + 'text.latex.unicode', + 'examples.directory', + 'savefig.frameon', # deprecated in MPL 3.1, to be removed in 3.3 + 'verbose.level', # deprecated in MPL 3.1, to be removed in 3.3 + 'verbose.fileo', # deprecated in MPL 3.1, to be removed in 3.3 + ] old_rcparams = {k: mpl.rcParams[k] for k in mpl.rcParams.keys() if mpl_version < '3.0' or k not in deprecated} From 2d73815e46de072524a06ac34d02fb464a21bc6d Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 13:58:38 +0200 Subject: [PATCH 45/52] Added CHANGELOG --- CHANGELOG.md | 79 ++++++++++++++++++++++++++++++++++++++++++++++ doc/releases.rst | 82 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 272405f4eb..a13816a78c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,82 @@ +Version 1.12.6 +============== + +This is a minor release containing a large number of bug fixes thanks +to the contributions from @joelostblom, @ahuang11, @chbrandt, +@randomstuff, @jbednar and @philippjfr. It also contains a number of +enhancements. This is the last planned release in the 1.12.x series. + +Enhancements: + +- Ensured that shared_axes option on layout plots is respected across backends + ([#3410](https://github.com/pyviz/holoviews/issues/3410)) +- Allow plotting partially irregular (curvilinear) mesh + ([#3952](https://github.com/pyviz/holoviews/issues/3952)) +- Add support for dependent functions in dynamic operations + ([#3975](https://github.com/pyviz/holoviews/issues/3975), + [#3980](https://github.com/pyviz/holoviews/issues/3980)) +- Add support for fast QuadMesh rasterization with datashader >= 0.8 + ([#4020](https://github.com/pyviz/holoviews/issues/4020)) +- Allow passing Panel widgets as operation parameter + ([#4028](https://github.com/pyviz/holoviews/issues/4028)) + +Bug fixes: + +- Fixed issue rounding datetimes in Curve step interpolation + ([#3958](https://github.com/pyviz/holoviews/issues/3958)) +- Fix resampling of categorical colorcet colormaps + ([#3977](https://github.com/pyviz/holoviews/issues/3977)) +- Ensure that changing the Stream source deletes the old source + ([#3978](https://github.com/pyviz/holoviews/issues/3978)) +- Ensure missing hover tool does not break plot + ([#3981](https://github.com/pyviz/holoviews/issues/3981)) +- Ensure .apply work correctly on HoloMaps + ([#3989](https://github.com/pyviz/holoviews/issues/3989), + [#4025](https://github.com/pyviz/holoviews/issues/4025)) +- Ensure Grid axes are always aligned in bokeh + ([#3916](https://github.com/pyviz/holoviews/issues/3916)) +- Fix hover tool on Image and Raster plots with inverted axis + ([#4010](https://github.com/pyviz/holoviews/issues/4010)) +- Ensure that DynamicMaps are still linked to streams after groupby + ([#4012](https://github.com/pyviz/holoviews/issues/4012)) +- Using hv.renderer no longer switches backends + ([#4013](https://github.com/pyviz/holoviews/issues/4013)) +- Ensure that Points/Scatter categorizes data correctly when axes are inverted + ([#4014](https://github.com/pyviz/holoviews/issues/4014)) +- Fixed error creating legend for matplotlib Image artists + ([#4031](https://github.com/pyviz/holoviews/issues/4031)) +- Ensure that unqualified Options objects are supported + ([#4032](https://github.com/pyviz/holoviews/issues/4032)) +- Fix bounds check when constructing Image with ImageInterface + ([#4035](https://github.com/pyviz/holoviews/issues/4035)) +- Ensure elements cannot be constructed with wrong number of columns + ([#4040](https://github.com/pyviz/holoviews/issues/4040)) +- Ensure streaming data works on bokeh server + ([#4041](https://github.com/pyviz/holoviews/issues/4041)) + +Compatibility: + +- Ensure HoloViews is fully compatible with xarray 0.13.0 + ([#3973](https://github.com/pyviz/holoviews/issues/3973)) +- Ensure that deprecated matplotlib 3.1 rcparams do not warn + ([#4042](https://github.com/pyviz/holoviews/issues/4042)) +- Ensure compatibility with new legend options in bokeh 1.4.0 + ([#4036](https://github.com/pyviz/holoviews/issues/4036)) + +Version 1.12.5 +============== + +This is a very minor bug fix release ensuring compatibility with recent +releases of dask. + +Compatibility: + +- Ensure that HoloViews can be imported when dask is installed but + dask.dataframe is not. + ([#3900](https://github.com/pyviz/holoviews/issues/3900)) +- Fix for rendering Scatter3D with matplotlib 3.1 + ([#3898](https://github.com/pyviz/holoviews/issues/3898)) + Version 1.12.4 ============== diff --git a/doc/releases.rst b/doc/releases.rst index c1d7f1bef6..04017aa806 100644 --- a/doc/releases.rst +++ b/doc/releases.rst @@ -15,6 +15,88 @@ Version 1.12 ~~~~~~~~~~~~ + +Version 1.12.6 +************** + +This is a minor release containing a large number of bug fixes thanks +to the contributions from @joelostblom, @ahuang11, @chbrandt, +@randomstuff, @jbednar and @philippjfr. It also contains a number of +enhancements. This is the last planned release in the 1.12.x series. + +Enhancements: + +* Ensured that shared_axes option on layout plots is respected across backends + (`#3410 `_) +* Allow plotting partially irregular (curvilinear) mesh + (`#3952 `_) +* Add support for dependent functions in dynamic operations + (`#3975 `_, + `#3980 `_) +* Add support for fast QuadMesh rasterization with datashader >= 0.8 + (`#4020 `_) +* Allow passing Panel widgets as operation parameter + (`#4028 `_) + +Bug fixes: + +* Fixed issue rounding datetimes in Curve step interpolation + (`#3958 `_) +* Fix resampling of categorical colorcet colormaps + (`#3977 `_) +* Ensure that changing the Stream source deletes the old source + (`#3978 `_) +* Ensure missing hover tool does not break plot + (`#3981 `_) +* Ensure .apply work correctly on HoloMaps + (`#3989 `_, + `#4025 `_) +* Ensure Grid axes are always aligned in bokeh + (`#3916 `_) +* Fix hover tool on Image and Raster plots with inverted axis + (`#4010 `_) +* Ensure that DynamicMaps are still linked to streams after groupby + (`#4012 `_) +* Using hv.renderer no longer switches backends + (`#4013 `_) +* Ensure that Points/Scatter categorizes data correctly when axes are inverted + (`#4014 `_) +* Fixed error creating legend for matplotlib Image artists + (`#4031 `_) +* Ensure that unqualified Options objects are supported + (`#4032 `_) +* Fix bounds check when constructing Image with ImageInterface + (`#4035 `_) +* Ensure elements cannot be constructed with wrong number of columns + (`#4040 `_) +* Ensure streaming data works on bokeh server + (`#4041 `_) + +Compatibility: + +* Ensure HoloViews is fully compatible with xarray 0.13.0 + (`#3973 `_) +* Ensure that deprecated matplotlib 3.1 rcparams do not warn + (`#4042 `_) +* Ensure compatibility with new legend options in bokeh 1.4.0 + (`#4036 `_) + + +Version 1.12.5 +************** + + +This is a very minor bug fix release ensuring compatibility with recent +releases of dask. + +Compatibility: + +* Ensure that HoloViews can be imported when dask is installed but + dask.dataframe is not. + (`#3900 `_) +* Fix for rendering Scatter3D with matplotlib 3.1 + (`#3898 `_) + Version 1.12.4 ************** From ff96be40f6e9619c4e8db5d5065deb34578cb252 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 14:09:43 +0200 Subject: [PATCH 46/52] Fix flake --- holoviews/plotting/bokeh/plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/holoviews/plotting/bokeh/plot.py b/holoviews/plotting/bokeh/plot.py index 266cb60a88..4f0ef08ffb 100644 --- a/holoviews/plotting/bokeh/plot.py +++ b/holoviews/plotting/bokeh/plot.py @@ -178,7 +178,7 @@ def _construct_callbacks(self): def refresh(self, **kwargs): if self.renderer.mode == 'server' and curdoc() is not self.document: - self._triggering += [s for p in self.traverse(lambda x: x, [Plot]) + self._triggering += [s for p in self.traverse(lambda x: x, [DimensionedPlot]) for s in p.streams if s._triggering] # If we do not have the Document lock, schedule refresh as callback self.document.add_next_tick_callback(self.refresh) From bd0f05cb1ccbc370bcde5fb908e8eabca438824c Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 14:31:07 +0200 Subject: [PATCH 47/52] Fixed flake --- holoviews/tests/teststreams.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/holoviews/tests/teststreams.py b/holoviews/tests/teststreams.py index cce76114aa..957bf85b55 100644 --- a/holoviews/tests/teststreams.py +++ b/holoviews/tests/teststreams.py @@ -7,7 +7,7 @@ import param from holoviews.core.spaces import DynamicMap from holoviews.core.util import LooseVersion, pd -from holoviews.element import Points +from holoviews.element import Points, Curve from holoviews.element.comparison import ComparisonTestCase from holoviews.streams import * # noqa (Test all available streams) from holoviews.util import Dynamic From 9721cf502d8d994da3e2b29eb115ff14afd78081 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 18:04:06 +0200 Subject: [PATCH 48/52] Fixed bug in new bokeh legend option handling (#4043) --- holoviews/plotting/bokeh/element.py | 2 +- .../tests/plotting/bokeh/testoverlayplot.py | 28 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index 633cab66c3..a164a158b7 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -1100,7 +1100,7 @@ def _glyph_properties(self, plot, element, source, ranges, style, group=None): legend = element.label if legend and self.overlaid: legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend' - properties[legend_prop] = value(legend) + properties[legend_prop] = legend return properties diff --git a/holoviews/tests/plotting/bokeh/testoverlayplot.py b/holoviews/tests/plotting/bokeh/testoverlayplot.py index 9cf6868399..ef38bab5f8 100644 --- a/holoviews/tests/plotting/bokeh/testoverlayplot.py +++ b/holoviews/tests/plotting/bokeh/testoverlayplot.py @@ -209,7 +209,7 @@ class TestLegends(TestBokehPlot): def test_overlay_legend(self): overlay = Curve(range(10), label='A') * Curve(range(10), label='B') plot = bokeh_renderer.get_plot(overlay) - legend_labels = [l.label['value'] for l in plot.state.legend[0].items] + legend_labels = [l.label for l in plot.state.legend[0].items] self.assertEqual(legend_labels, ['A', 'B']) def test_dynamic_subplot_remapping(self): @@ -220,7 +220,7 @@ def cb(X): plot = bokeh_renderer.get_plot(dmap) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': '1'}, {'value': '2'}]) + self.assertEqual(legend_labels, ['1', '2']) colors = Cycle().values for i, (subplot, color) in enumerate(zip(plot.subplots.values(), colors[3:])): self.assertEqual(subplot.handles['glyph'].line_color, color) @@ -232,25 +232,25 @@ def test_holomap_legend_updates(self): for i in range(3)}) plot = bokeh_renderer.get_plot(hmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'C'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['C', 'B']) plot.update((1,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'B'}, {'value': 'D'}]) + self.assertEqual(legend_labels, ['B', 'D']) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'B'}, {'value': 'E'}]) + self.assertEqual(legend_labels, ['B', 'E']) def test_holomap_legend_updates_varying_lengths(self): hmap = HoloMap({i: Overlay([Curve([1, 2, j], label=chr(65+j)) for j in range(i)]) for i in range(1, 4)}) plot = bokeh_renderer.get_plot(hmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}]) + self.assertEqual(legend_labels, ['A']) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['A', 'B']) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}, {'value': 'C'}]) + self.assertEqual(legend_labels, ['A', 'B', 'C']) def test_dynamicmap_legend_updates(self): hmap = HoloMap({i: Curve([1, 2, 3], label=chr(65+i+2)) * Curve([1, 2, 3], label='B') @@ -258,26 +258,26 @@ def test_dynamicmap_legend_updates(self): dmap = Dynamic(hmap) plot = bokeh_renderer.get_plot(dmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'C'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['C', 'B']) plot.update((1,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'D'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['D', 'B']) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'E'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['E', 'B']) def test_dynamicmap_legend_updates_add_dynamic_plots(self): hmap = HoloMap({i: Overlay([Curve([1, 2, j], label=chr(65+j)) for j in range(i)]) for i in range(1, 4)}) dmap = Dynamic(hmap) plot = bokeh_renderer.get_plot(dmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}]) + self.assertEqual(legend_labels, ['A']) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}]) + self.assertEqual(legend_labels, ['A', 'B']) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}, {'value': 'C'}]) + self.assertEqual(legend_labels, ['A', 'B', 'C']) def test_dynamicmap_ndoverlay_shrink_number_of_items(self): selected = Stream.define('selected', items=3)() From 9e0c6cae24a5aa82a411e2c42abf9d0fbf44a31b Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 19:13:27 +0200 Subject: [PATCH 49/52] Various fixes --- holoviews/operation/datashader.py | 1 - holoviews/plotting/mpl/element.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index 7c89f1e376..8c35ab5332 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -26,7 +26,6 @@ from ..core.util import ( LooseVersion, basestring, cftime_types, cftime_to_timestamp, datetime_types, dt_to_int, get_param_values, max_range) - datetime_types, dt_to_int, get_param_values, max_range) from ..element import (Image, Path, Curve, RGB, Graph, TriMesh, QuadMesh, Contours, Spikes, Area, Spread, Scatter, Points) diff --git a/holoviews/plotting/mpl/element.py b/holoviews/plotting/mpl/element.py index fb65f1de47..9ef4623023 100644 --- a/holoviews/plotting/mpl/element.py +++ b/holoviews/plotting/mpl/element.py @@ -269,7 +269,7 @@ def _set_axis_formatter(self, axis, dim, formatter): Set axis formatter based on dimension formatter. """ if isinstance(dim, list): dim = dim[0] - if formatter is not None: + if formatter is not None or dim is None: pass elif dim.value_format: formatter = dim.value_format From 794d75e89dd08146d0624b0d4fd72edaecffed5c Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 19:39:49 +0200 Subject: [PATCH 50/52] Various fixes for datashader --- holoviews/operation/datashader.py | 76 +++++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index 8c35ab5332..1a2c0c544e 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -27,8 +27,7 @@ LooseVersion, basestring, cftime_types, cftime_to_timestamp, datetime_types, dt_to_int, get_param_values, max_range) from ..element import (Image, Path, Curve, RGB, Graph, TriMesh, - QuadMesh, Contours, Spikes, Area, Spread, - Scatter, Points) + QuadMesh, Contours, Scatter, Points) from ..streams import RangeXY, PlotSize ds_version = LooseVersion(ds.__version__) @@ -114,7 +113,8 @@ def instance(self_or_cls,**params): inst._precomputed = {} return inst - def _get_sampling(self, element, x, y): + + def _get_sampling(self, element, x, y, ndim=2, default=None): target = self.p.target if not isinstance(x, list) and x is not None: x = [x] @@ -128,7 +128,8 @@ def _get_sampling(self, element, x, y): else: if x is None: x_range = self.p.x_range or (-0.5, 0.5) - y_range = self.p.y_range or (-0.5, 0.5) + elif self.p.expand or not self.p.x_range: + x_range = self.p.x_range or max_range([element.range(xd) for xd in x]) else: x0, x1 = self.p.x_range ex0, ex1 = max_range([element.range(xd) for xd in x]) @@ -145,18 +146,9 @@ def _get_sampling(self, element, x, y): if default is None: ey0, ey1 = max_range([element.range(yd) for yd in y]) else: - x0, x1 = self.p.x_range - ex0, ex1 = element.range(x) - x_range = (np.min([np.max([x0, ex0]), ex1]), - np.max([np.min([x1, ex1]), ex0])) - - if self.p.expand or not self.p.y_range: - y_range = self.p.y_range or element.range(y) - else: - y0, y1 = self.p.y_range - ey0, ey1 = element.range(y) - y_range = (np.min([np.max([y0, ey0]), ey1]), - np.max([np.min([y1, ey1]), ey0])) + ey0, ey1 = default + y_range = (np.min([np.max([y0, ey0]), ey1]), + np.max([np.min([y1, ey1]), ey0])) width, height = self.p.width, self.p.height (xstart, xend), (ystart, yend) = x_range, y_range @@ -168,7 +160,6 @@ def _get_sampling(self, element, x, y): xstart, xend = 0, 0 if x and element.get_dimension_type(x[0]) in datetime_types: xtype = 'datetime' - x_range = (xstart, xend) ytype = 'numeric' if isinstance(ystart, datetime_types) or isinstance(yend, datetime_types): @@ -178,7 +169,6 @@ def _get_sampling(self, element, x, y): ystart, yend = 0, 0 if y and element.get_dimension_type(y[0]) in datetime_types: ytype = 'datetime' - y_range = (ystart, yend) # Compute highest allowed sampling density xspan = xend - xstart @@ -198,7 +188,18 @@ def _get_sampling(self, element, x, y): xs, ys = (np.linspace(xstart+xunit/2., xend-xunit/2., width), np.linspace(ystart+yunit/2., yend-yunit/2., height)) - return (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) + return ((xstart, xend), (ystart, yend)), (xs, ys), (width, height), (xtype, ytype) + + + def _dt_transform(self, x_range, y_range, xs, ys, xtype, ytype): + (xstart, xend), (ystart, yend) = x_range, y_range + if xtype == 'datetime': + xstart, xend = (np.array([xstart, xend])/1e3).astype('datetime64[us]') + xs = (xs/1e3).astype('datetime64[us]') + if ytype == 'datetime': + ystart, yend = (np.array([ystart, yend])/1e3).astype('datetime64[us]') + ys = (ys/1e3).astype('datetime64[us]') + return ((xstart, xend), (ystart, yend)), (xs, ys) @@ -260,6 +261,43 @@ def _get_aggregator(self, element, add_field=True): return agg + def _empty_agg(self, element, x, y, width, height, xs, ys, agg_fn, **params): + x = x.name if x else 'x' + y = y.name if x else 'y' + xarray = xr.DataArray(np.full((height, width), np.NaN), + dims=[y, x], coords={x: xs, y: ys}) + if width == 0: + params['xdensity'] = 1 + if height == 0: + params['ydensity'] = 1 + el = self.p.element_type(xarray, **params) + if isinstance(agg_fn, ds.count_cat): + vals = element.dimension_values(agg_fn.column, expanded=False) + dim = element.get_dimension(agg_fn.column) + return NdOverlay({v: el for v in vals}, dim) + return el + + + def _get_agg_params(self, element, x, y, agg_fn, bounds): + params = dict(get_param_values(element), kdims=[x, y], + datatype=['xarray'], bounds=bounds) + + column = agg_fn.column if agg_fn else None + if column: + dims = [d for d in element.dimensions('ranges') if d == column] + if not dims: + raise ValueError("Aggregation column %s not found on %s element. " + "Ensure the aggregator references an existing " + "dimension." % (column,element)) + name = '%s Count' % column if isinstance(agg_fn, ds.count_cat) else column + vdims = [dims[0].clone(name)] + else: + vdims = Dimension('Count') + params['vdims'] = vdims + return params + + + class aggregate(AggregationOperation): """ aggregate implements 2D binning for any valid HoloViews Element From ff5ee8671ce401dd011ef173e12393a5c8551566 Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Tue, 8 Oct 2019 22:43:39 +0200 Subject: [PATCH 51/52] Ensure bokeh legend fields are handled correctly (#4048) --- holoviews/plotting/bokeh/element.py | 20 ++++++++++--- .../tests/plotting/bokeh/testoverlayplot.py | 29 +++++++++---------- .../tests/plotting/bokeh/testpathplot.py | 3 +- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index a164a158b7..d227b51fbb 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -1127,8 +1127,11 @@ def _update_glyph(self, renderer, properties, mapping, glyph, source, data): allowed_properties = glyph.properties() properties = mpl_to_bokeh(properties) merged = dict(properties, **mapping) - legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend' - legend = merged.pop(legend_prop, None) + legend_props = ('legend_field', 'legend_label') if bokeh_version >= '1.3.5' else ('legend',) + for lp in legend_props: + legend = merged.pop(lp, None) + if legend is not None: + break columns = list(source.data.keys()) glyph_updates = [] for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'): @@ -1170,7 +1173,16 @@ def _update_glyph(self, renderer, properties, mapping, glyph, source, data): for leg in self.state.legend: for item in leg.items: if renderer in item.renderers: - item.label = legend + if isinstance(legend, dict): + label = legend + elif lp != 'legend': + prop = 'value' if 'label' in lp else 'field' + label = {prop: legend} + elif isinstance(item.label, dict): + label = {list(item.label)[0]: legend} + else: + label = {'value': legend} + item.label = label for glyph, update in glyph_updates: glyph.update(**update) @@ -1758,7 +1770,7 @@ def _get_color_data(self, element, ranges, style, name='color', factors=None, co data[field] = cdata if factors is not None and self.show_legend: legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend' - mapping[legend_prop] = {'field': field} + mapping[legend_prop] = field mapping[name] = {'field': field, 'transform': mapper} return data, mapping diff --git a/holoviews/tests/plotting/bokeh/testoverlayplot.py b/holoviews/tests/plotting/bokeh/testoverlayplot.py index ef38bab5f8..b7e58ac698 100644 --- a/holoviews/tests/plotting/bokeh/testoverlayplot.py +++ b/holoviews/tests/plotting/bokeh/testoverlayplot.py @@ -209,7 +209,7 @@ class TestLegends(TestBokehPlot): def test_overlay_legend(self): overlay = Curve(range(10), label='A') * Curve(range(10), label='B') plot = bokeh_renderer.get_plot(overlay) - legend_labels = [l.label for l in plot.state.legend[0].items] + legend_labels = [l.label['value'] for l in plot.state.legend[0].items] self.assertEqual(legend_labels, ['A', 'B']) def test_dynamic_subplot_remapping(self): @@ -220,7 +220,7 @@ def cb(X): plot = bokeh_renderer.get_plot(dmap) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['1', '2']) + self.assertEqual(legend_labels, [{'value': '1'}, {'value': '2'}]) colors = Cycle().values for i, (subplot, color) in enumerate(zip(plot.subplots.values(), colors[3:])): self.assertEqual(subplot.handles['glyph'].line_color, color) @@ -232,25 +232,25 @@ def test_holomap_legend_updates(self): for i in range(3)}) plot = bokeh_renderer.get_plot(hmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['C', 'B']) + self.assertEqual(legend_labels, [{'value': 'C'}, {'value': 'B'}]) plot.update((1,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['B', 'D']) + self.assertEqual(legend_labels, [{'value': 'B'}, {'value': 'D'}]) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['B', 'E']) + self.assertEqual(legend_labels, [{'value': 'B'}, {'value': 'E'}]) def test_holomap_legend_updates_varying_lengths(self): hmap = HoloMap({i: Overlay([Curve([1, 2, j], label=chr(65+j)) for j in range(i)]) for i in range(1, 4)}) plot = bokeh_renderer.get_plot(hmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A']) + self.assertEqual(legend_labels, [{'value': 'A'}]) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A', 'B']) + self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}]) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A', 'B', 'C']) + self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}, {'value': 'C'}]) def test_dynamicmap_legend_updates(self): hmap = HoloMap({i: Curve([1, 2, 3], label=chr(65+i+2)) * Curve([1, 2, 3], label='B') @@ -258,26 +258,26 @@ def test_dynamicmap_legend_updates(self): dmap = Dynamic(hmap) plot = bokeh_renderer.get_plot(dmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['C', 'B']) + self.assertEqual(legend_labels, [{'value': 'C'}, {'value': 'B'}]) plot.update((1,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['D', 'B']) + self.assertEqual(legend_labels, [{'value': 'D'}, {'value': 'B'}]) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['E', 'B']) + self.assertEqual(legend_labels, [{'value': 'E'}, {'value': 'B'}]) def test_dynamicmap_legend_updates_add_dynamic_plots(self): hmap = HoloMap({i: Overlay([Curve([1, 2, j], label=chr(65+j)) for j in range(i)]) for i in range(1, 4)}) dmap = Dynamic(hmap) plot = bokeh_renderer.get_plot(dmap) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A']) + self.assertEqual(legend_labels, [{'value': 'A'}]) plot.update((2,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A', 'B']) + self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}]) plot.update((3,)) legend_labels = [item.label for item in plot.state.legend[0].items] - self.assertEqual(legend_labels, ['A', 'B', 'C']) + self.assertEqual(legend_labels, [{'value': 'A'}, {'value': 'B'}, {'value': 'C'}]) def test_dynamicmap_ndoverlay_shrink_number_of_items(self): selected = Stream.define('selected', items=3)() @@ -287,4 +287,3 @@ def callback(items): plot = bokeh_renderer.get_plot(dmap) selected.event(items=2) self.assertEqual(len([r for r in plot.state.renderers if r.visible]), 2) - diff --git a/holoviews/tests/plotting/bokeh/testpathplot.py b/holoviews/tests/plotting/bokeh/testpathplot.py index 2f6e76c29e..7b2866c3ef 100644 --- a/holoviews/tests/plotting/bokeh/testpathplot.py +++ b/holoviews/tests/plotting/bokeh/testpathplot.py @@ -181,7 +181,8 @@ def test_path_continuously_varying_color_legend(self): path = Path(data, vdims="cat").opts(color="cat", cmap=dict(zip(levels, colors)), line_width=4, show_legend=True) plot = bokeh_renderer.get_plot(path) item = plot.state.legend[0].items[0] - self.assertEqual(item.label, 'color_str__') + legend = {'field': 'color_str__'} + self.assertEqual(item.label, legend) self.assertEqual(item.renderers, [plot.handles['glyph_renderer']]) From dfbeb6f57a105b35ba3c4425534d17717ee8ec5f Mon Sep 17 00:00:00 2001 From: Philipp Rudiger Date: Wed, 9 Oct 2019 00:49:58 +0200 Subject: [PATCH 52/52] Correctly detect depends fn dependencies (#4049) --- holoviews/streams.py | 2 +- holoviews/tests/teststreams.py | 14 ++++++++------ holoviews/util/__init__.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/holoviews/streams.py b/holoviews/streams.py index 916abbfbf8..99ec01e8e0 100644 --- a/holoviews/streams.py +++ b/holoviews/streams.py @@ -717,7 +717,7 @@ def _on_trigger(self): @property def hashkey(self): hashkey = {(p.owner, p.name): getattr(p.owner, p.name) for p in self.parameters} - hashkey = {self._rename.get((o, n), n): v for (o, n), v in hashkey.items() + hashkey = {' '.join([o.name, self._rename.get((o, n), n)]): v for (o, n), v in hashkey.items() if self._rename.get((o, n), True) is not None} hashkey['_memoize_key'] = self._memoize_counter return hashkey diff --git a/holoviews/tests/teststreams.py b/holoviews/tests/teststreams.py index 957bf85b55..ceabe77176 100644 --- a/holoviews/tests/teststreams.py +++ b/holoviews/tests/teststreams.py @@ -296,7 +296,7 @@ def test_param_stream_action(self): def subscriber(**kwargs): values.append(kwargs) self.assertEqual(set(stream.hashkey), - {'action', '_memoize_key'}) + {'%s action' % inner.name, '_memoize_key'}) stream.add_subscriber(subscriber) inner.action(inner) @@ -310,8 +310,9 @@ def test_param_stream_memoization(self): values = [] def subscriber(**kwargs): values.append(kwargs) - self.assertEqual(set(stream.hashkey), - {'action', 'x', '_memoize_key'}) + self.assertEqual( + set(stream.hashkey), + {'%s action' % inner.name, '%s x' % inner.name, '_memoize_key'}) stream.add_subscriber(subscriber) inner.action(inner) @@ -484,7 +485,7 @@ def test_dynamicmap_param_method_action_param(self): def subscriber(**kwargs): values.append(kwargs) self.assertEqual(set(stream.hashkey), - {'action', '_memoize_key'}) + {'%s action' % inner.name, '_memoize_key'}) stream.add_subscriber(subscriber) inner.action(inner) @@ -502,8 +503,9 @@ def test_dynamicmap_param_action_number_method_memoizes(self): values = [] def subscriber(**kwargs): values.append(kwargs) - self.assertEqual(set(stream.hashkey), - {'action', 'x', '_memoize_key'}) + self.assertEqual( + set(stream.hashkey), + {'%s action' % inner.name, '%s x' % inner.name, '_memoize_key'}) stream.add_subscriber(subscriber) stream.add_subscriber(lambda **kwargs: dmap[()]) diff --git a/holoviews/util/__init__.py b/holoviews/util/__init__.py index d5c1f8d0b9..e254436a2c 100644 --- a/holoviews/util/__init__.py +++ b/holoviews/util/__init__.py @@ -906,7 +906,7 @@ def _get_streams(self, map_obj, watch=True): streams.append(value) elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'): dependencies = list(value._dinfo.get('dependencies', [])) - dependencies += list(value._dinfo.get('kwargs', {}).values()) + dependencies += list(value._dinfo.get('kw', {}).values()) params = [d for d in dependencies if isinstance(d, param.Parameter) and isinstance(d.owner, param.Parameterized)] streams.append(Params(parameters=params, watch_only=True))