Skip to content

Commit

Permalink
Merge branch 'master' of git@github.com:gschramm/pymirc.git
Browse files Browse the repository at this point in the history
  • Loading branch information
gschramm committed Mar 27, 2020
2 parents 7619552 + 74d3892 commit f5cade0
Show file tree
Hide file tree
Showing 3 changed files with 138 additions and 11 deletions.
28 changes: 18 additions & 10 deletions examples/keras_mnist_segmentation/slow_generator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import math
import numpy as np
import os
import threading

from tensorflow import keras
from unet import unet
Expand All @@ -22,7 +24,11 @@ def __getitem__(self, idx):
batch_x = self.x[start:end, ...]
batch_y = self.y[start:end, ...]

print(f'going to sleep for {sleep_time}s')
# dummy calculation to produce cpu load
for dd in range(10000):
dummy = np.random.random(100000)**2

print(f' creating batch {idx}, pid {os.getpid()}, tid {threading.get_ident()}')
sleep(self.sleep_time)

return batch_x, batch_y
Expand All @@ -32,25 +38,27 @@ def __getitem__(self, idx):
if __name__ == '__main__':
shape = (800,32,32,1)
val_shape = (80,32,32,1)
batch_size = 8
sleep_time = 1
batch_size = 80
sleep_time = 0.1

np.random.seed(1)

x = np.random.random(shape)
y = (np.random.random(shape) > 0.5).astype(float)

x_val = np.random.random(val_shape)
y_val = (np.random.random(val_shape) > 0.5).astype(float)

gen = SLOWSequence(x, y, batch_size, sleep_time = sleep_time)

model = unet(input_shape = x.shape[1:], nfeat = 2, batch_normalization = True)
model = unet(input_shape = x.shape[1:], nfeat = 32, batch_normalization = True)

model.compile(optimizer = keras.optimizers.Adam(learning_rate = 1e-3),
loss = keras.losses.BinaryCrossentropy())

# use_multiporcessing in model.fit() only works correctly in tf 2.1
# in tf 2.0 it is always executed in the main process
# in tf 2.0, use fit_generator() (which is deprecated)
history = model.fit(gen,
epochs = 2,
validation_data = (x_val, y_val),
shuffle = False)
epochs = 4,
shuffle = False,
use_multiprocessing = True,
workers = 8,
max_queue_size = 8)
24 changes: 23 additions & 1 deletion pymirc/metrics/tf_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
else:
from tensorflow.keras import backend as K

from .tf_metrics import soft_dice_coef, soft_jaccard_index, IoU
from .tf_metrics import soft_dice_coef, soft_jaccard_index, IoU, ssim_3d


def weighted_binary_crossentropy(weights=[.5, 1]):
Expand Down Expand Up @@ -66,3 +66,25 @@ def focal_loss_fixed(y_true, y_pred):

return focal_loss_fixed


def ssim_3d_loss(x, y, **kwargs):
""" Compute the structural similarity loss between two batches of 3D single channel images
Parameters
----------
x,y : tensorflow tensors with shape [batch_size,depth,height,width,1]
containing a batch of 3D images with 1 channel
**kwargs : dict
passed to tf_ssim_3d
Returns
-------
a 1D tensorflow tensor of length batch_size containing the 1 - SSIM for
every image pair in the batch
See also
----------
tf_ssim_3d
"""
return 1 - ssim_3d(x, y, **kwargs)
97 changes: 97 additions & 0 deletions pymirc/metrics/tf_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,103 @@ def IoU(y_true, y_pred):
'''
return jaccard_index(y_true, y_pred)

def tf_gauss_kernel_3d(sigma, size):
"""Generate 3D Gaussian kernel
Parameters
----------
sigma : float
width of the gaussian
size : int
size of the gaussian (should be odd an approx 2*int(3.5*sigma + 0.5) + 1
Returns
-------
tensorflow tensor with dimension [size,size,size,1,1] with tf.reduce_sum(k) = 1
"""
size = tf.convert_to_tensor(size, tf.int32)
sigma = tf.convert_to_tensor(sigma, tf.float32)

coords = tf.cast(tf.range(size), tf.float32) - tf.cast(size - 1, tf.float32) / 2.0

g = -0.5*tf.square(coords) / tf.square(sigma)
g = tf.nn.softmax(g)

g = tf.einsum('i,j,k->ijk', g, g, g)
g = tf.expand_dims(tf.expand_dims(g, -1), -1)

return g

def ssim_3d(x, y, sigma = 1.5, size = 11, L = None, K1 = 0.01, K2 = 0.03, return_image = False):
""" Compute the structural similarity between two batches of 3D single channel images
Parameters
----------
x,y : tensorflow tensors with shape [batch_size,depth,height,width,1]
containing a batch of 3D images with 1 channel
L : float
dynamic range of the images.
By default (None) it is set to tf.reduce_max(y) - tf.reduce_min(y)
K1, K2 : float
small constants needed to avoid division by 0 see [1].
Default 0.01, 0.03
sigma : float
width of the gaussian filter in pixels
Default 1.5
size : int
size of the gaussian kernel used to calculate local means and std.devs
Default 11
Returns
-------
a 1D tensorflow tensor of length batch_size containing the SSIM for
every image pair in the batch
Note
----
(1) This implementation is very close to [1] and
from skimage.metrics import structural_similarity
structural_similarity(x, y, gaussian_weights = True, full = True, data_range = L)
(2) The default way of how the dynamic range L is calculated (based on y)
is different from [1] and structural_similarity()
References
----------
[1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing
"""
if (x.shape[-1] != 1) or (y.shape[-1] != 1):
raise ValueError('Last dimension of input x has to be 1')

if L is None:
L = tf.reduce_max(y) - tf.reduce_min(y)

C1 = (K1*L)**2
C2 = (K2*L)**2

shape = x.shape
kernel = tf_gauss_kernel_3d(sigma, size)

mu_x = tf.nn.conv3d(x, kernel, strides = [1,1,1,1,1], padding = 'VALID')
mu_y = tf.nn.conv3d(y, kernel, strides = [1,1,1,1,1], padding = 'VALID')

mu_x_sq = mu_x*mu_x
mu_y_sq = mu_y*mu_y
mu_x_y = mu_x*mu_y

sig_x_sq = tf.nn.conv3d(x*x, kernel, strides = [1,1,1,1,1], padding = 'VALID') - mu_x_sq
sig_y_sq = tf.nn.conv3d(y*y, kernel, strides = [1,1,1,1,1], padding = 'VALID') - mu_y_sq
sig_xy = tf.nn.conv3d(x*y, kernel, strides = [1,1,1,1,1], padding = 'VALID') - mu_x_y

SSIM= (2*mu_x_y + C1)*(2*sig_xy + C2) / ((mu_x_sq + mu_y_sq + C1)*(sig_x_sq + sig_y_sq + C2))

if not return_image:
SSIM = tf.reduce_mean(SSIM, [1,2,3,4])

return SSIM

# aliases
# dice = DICE = dice_coef
Expand Down

0 comments on commit f5cade0

Please sign in to comment.