Skip to content

Commit

Permalink
Adds WeightedL1Norm to Functions (#1618)
Browse files Browse the repository at this point in the history
* added WeightedL1Norm with unit test
* docstring and documentation changes
* fix soft_shrinkage for complex data
Signed-off-by: Edoardo Pasca <edo.paskino@gmail.com>
Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com>
Co-authored-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com>
  • Loading branch information
paskino authored Dec 15, 2023
1 parent 287026c commit a754e68
Show file tree
Hide file tree
Showing 6 changed files with 356 additions and 61 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
* x.x.x
- Added a weight argument to the L1Norm function
- Allow reduction methods on the DataContainer class to accept axis argument as string which matches values in dimension_labels
- Added the functions `set_norms` and `get_norms` to the `BlockOperator` class
- Internal variable name change in BlockOperator to aid understanding
Expand Down
2 changes: 2 additions & 0 deletions NOTICE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ Institutions Key:
8 - UES Inc.
9 - Swansea University
10 - University of Warwick
11 - University of Helsinki

CIL Developers in date order:
Edoardo Pasca (2017 – present) - 1
Expand All @@ -55,6 +56,7 @@ Sam Tygier (2022) - 1
Andrew Sharits (2022) - 8
Kyle Pidgeon (2023) - 1
Letizia Protopapa (2023) - 1
Tommi Heikkilä (2023) - 11

CIL Advisory Board:
Llion Evans - 9
Expand Down
239 changes: 184 additions & 55 deletions Wrappers/Python/cil/optimisation/functions/L1Norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,27 +20,169 @@
from cil.optimisation.functions import Function
from cil.framework import BlockDataContainer
import numpy as np


def soft_shrinkage(x, tau, out=None):

r"""Returns the value of the soft-shrinkage operator at x.
Parameters
-----------
x : DataContainer
where to evaluate the soft-shrinkage operator.
tau : float, numpy ndarray, DataContainer
out : DataContainer, default None
where to store the result. If None, a new DataContainer is created.
Returns
--------
the value of the soft-shrinkage operator at x: DataContainer.
"""

should_return = False
if out is None:
out = x.abs()
if x.dtype in [np.csingle, np.cdouble, np.clongdouble]:
out = x * 0
outarr = out.as_array()
outarr.real = x.abs().as_array()
out.fill(outarr)
else:
out = x.abs()
should_return = True
else:
x.abs(out = out)
if x.dtype in [np.csingle, np.cdouble, np.clongdouble]:
outarr = out.as_array()
outarr.real = x.abs().as_array()
outarr.imag = 0
out.fill(outarr)
else:
x.abs(out = out)
out -= tau
out.maximum(0, out = out)
out *= x.sign()
if x.dtype in [np.csingle, np.cdouble, np.clongdouble]:
out *= np.exp(1j*np.angle(x.as_array()))

else:
out *= x.sign()


if should_return:
return out

class L1Norm(Function):
r"""L1Norm function
Consider the following cases:
a) .. math:: F(x) = ||x||_{1}
b) .. math:: F(x) = ||x - b||_{1}
In the weighted case, :math:`w` is an array of positive weights.
a) .. math:: F(x) = ||x||_{L^1(w)}
b) .. math:: F(x) = ||x - b||_{L^1(w)}
with :math:`||x||_{L^1(w)} = || x \cdot w||_1 = \sum_{i=1}^{n} |x_i| w_i`.
Parameters
-----------
weight: DataContainer, numpy ndarray, default None
Array of positive weights. If :code:`None` returns the L1 Norm.
b: DataContainer, default None
Translation of the function.
"""
def __init__(self, b=None, weight=None):
super(L1Norm, self).__init__(L=None)
if weight is None:
self.function = _L1Norm(b=b)
else:
self.function = _WeightedL1Norm(b=b, weight=weight)

def __call__(self, x):
r"""Returns the value of the L1Norm function at x.
.. math:: f(x) = ||x - b||_{L^1(w)}
"""
return self.function(x)

def convex_conjugate(self, x):
r"""Returns the value of the convex conjugate of the L1 Norm function at x.
This is the indicator of the unit :math:`L^{\infty}` norm:
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*}) + \langle x^{*},b\rangle
.. math:: \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*})
= \begin{cases}
0, \mbox{if } \|x^{*}\|_{\infty}\leq1\\
\infty, \mbox{otherwise}
\end{cases}
In the weighted case the convex conjugate is the indicator of the unit
:math:`L^{\infty}` norm.
See:
https://math.stackexchange.com/questions/1533217/convex-conjugate-of-l1-norm-function-with-weight
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(x^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(x^{*}) + \langle x^{*},b\rangle
with :math:`\|x\|_{L^\infty(w^{-1})} = \max_{i} \frac{|x_i|}{w_i}`.
Parameters
-----------
x : DataContainer
where to evaluate the convex conjugate of the L1 Norm function.
Returns
--------
the value of the convex conjugate of the WeightedL1Norm function at x: DataContainer.
"""
return self.function.convex_conjugate(x)

def proximal(self, x, tau, out=None):
r"""Returns the value of the proximal operator of the L1 Norm function at x with scaling parameter `tau`.
Consider the following cases:
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x) + b
where,
.. math :: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x) = sgn(x) * \max\{ |x| - \tau, 0 \}
The weighted case follows from Example 6.23 in Chapter 6 of "First-Order Methods in Optimization"
by Amir Beck, SIAM 2017 https://archive.siam.org/books/mo25/mo25_ch6.pdf
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_{\tau*w}(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_{\tau*w}(x) + b
Parameters
-----------
x: DataContainer
tau: float, ndarray, DataContainer
out: DataContainer, default None
If not None, the result will be stored in this object.
Returns
--------
The value of the proximal operator of the L1 norm function at x: DataContainer.
"""
return self.function.proximal(x, tau, out=out)


class _L1Norm(Function):

r"""L1Norm function
Expand All @@ -60,70 +202,26 @@ def __init__(self, **kwargs):
:param b: translation of the function
:type b: :code:`DataContainer`, optional
'''
super(L1Norm, self).__init__()
super().__init__()
self.b = kwargs.get('b',None)

def __call__(self, x):

r"""Returns the value of the L1Norm function at x.
Consider the following cases:
a) .. math:: F(x) = ||x||_{1}
b) .. math:: F(x) = ||x - b||_{1}
"""

y = x
if self.b is not None:
y = x - self.b
return y.abs().sum()

def convex_conjugate(self,x):

r"""Returns the value of the convex conjugate of the L1Norm function at x.
Here, we need to use the convex conjugate of L1Norm, which is the Indicator of the unit
:math:`L^{\infty}` norm
Consider the following cases:
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*}) + <x^{*},b>
.. math:: \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*})
= \begin{cases}
0, \mbox{if } \|x^{*}\|_{\infty}\leq1\\
\infty, \mbox{otherwise}
\end{cases}
"""

def convex_conjugate(self,x):
tmp = x.abs().max() - 1
if tmp<=1e-5:
if self.b is not None:
return self.b.dot(x)
else:
return 0.
return np.inf
return np.inf


def proximal(self, x, tau, out=None):

r"""Returns the value of the proximal operator of the L1Norm function at x.
Consider the following cases:
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x) + b
where,
.. math :: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x) = sgn(x) * \max\{ |x| - \tau, 0 \}
"""


if out is None:
if self.b is not None:
return self.b + soft_shrinkage(x - self.b, tau)
Expand All @@ -138,6 +236,40 @@ def proximal(self, x, tau, out=None):
soft_shrinkage(x, tau, out = out)


class _WeightedL1Norm(Function):

def __init__(self, weight, b=None):
super().__init__()
self.weight = weight
self.b = b

if np.min(weight) <= 0:
raise ValueError("Weights should be strictly positive!")

def __call__(self, x):
y = x*self.weight

if self.b is not None:
y -= self.b

return y.abs().sum()

def convex_conjugate(self,x):
tmp = (x.abs()/self.weight).max() - 1

if tmp<=1e-5:
if self.b is not None:
return self.b.dot(x)
else:
return 0.
return np.inf

def proximal(self, x, tau, out=None):
tau *= self.weight
ret = _L1Norm.proximal(self, x, tau, out=out)
tau /= self.weight
return ret

class MixedL11Norm(Function):

r"""MixedL11Norm function
Expand Down Expand Up @@ -186,6 +318,3 @@ def proximal(self, x, tau, out = None):
raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x)))

return soft_shrinkage(x, tau, out = out)



2 changes: 1 addition & 1 deletion Wrappers/Python/cil/optimisation/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from .Function import ConstantFunction
from .Function import ZeroFunction
from .Function import TranslateFunction
from .L1Norm import L1Norm, MixedL11Norm
from .L1Norm import L1Norm, MixedL11Norm, soft_shrinkage
from .L2NormSquared import L2NormSquared
from .L2NormSquared import WeightedL2NormSquared
from .LeastSquares import LeastSquares
Expand Down
Loading

0 comments on commit a754e68

Please sign in to comment.