From 03b353d5f1bcdffea96338d76d44dac1977a3195 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 13 Jan 2020 13:59:29 +0000 Subject: [PATCH 1/3] converts camel case to lower case with underscore --- examples/cardiac_segmentation.ipynb | 2 +- monai/__init__.py | 10 +- monai/application/__init__.py | 2 - monai/application/config/__init__.py | 2 - monai/application/config/deviceconfig.py | 21 +- monai/application/engine/__init__.py | 2 - monai/application/handlers/metric_logger.py | 38 ++-- monai/data/__init__.py | 2 - monai/data/augments/__init__.py | 2 - monai/data/augments/augments.py | 97 +++++----- monai/data/augments/augmentstream.py | 43 +++-- monai/data/augments/decorators.py | 49 ++--- monai/data/readers/__init__.py | 2 - monai/data/readers/arrayreader.py | 84 ++++---- monai/data/readers/npzreader.py | 18 +- monai/data/streams/datastream.py | 203 ++++++++++---------- monai/data/streams/threadbufferstream.py | 43 +++-- monai/data/transforms/noise_adder.py | 1 - monai/networks/__init__.py | 1 - monai/networks/layers/__init__.py | 2 - monai/networks/layers/convolutions.py | 88 +++++---- monai/networks/layers/simplelayers.py | 6 +- monai/networks/losses/__init__.py | 2 - monai/networks/losses/dice.py | 28 +-- monai/networks/nets/__init__.py | 2 - monai/networks/nets/unet.py | 88 ++++++--- monai/networks/utils.py | 25 ++- monai/utils/__init__.py | 2 +- monai/utils/aliases.py | 16 +- monai/utils/arrayutils.py | 50 ++--- monai/utils/convutils.py | 28 +-- monai/utils/decorators.py | 29 +-- monai/utils/mathutils.py | 9 +- monai/utils/moduleutils.py | 8 +- 34 files changed, 525 insertions(+), 480 deletions(-) diff --git a/examples/cardiac_segmentation.ipynb b/examples/cardiac_segmentation.ipynb index e66496a69e..f96a14a5db 100644 --- a/examples/cardiac_segmentation.ipynb +++ b/examples/cardiac_segmentation.ipynb @@ -36,7 +36,7 @@ "from monai import application, data, networks, utils\n", "import monai.data.augments.augments as augments\n", "\n", - "application.config.printConfig()" + "application.config.print_config()" ] }, { diff --git a/monai/__init__.py b/monai/__init__.py index dbe64c7938..3e9f913019 100644 --- a/monai/__init__.py +++ b/monai/__init__.py @@ -1,6 +1,7 @@ -import os, sys -from .utils.moduleutils import loadSubmodules +import os +import sys +from .utils.moduleutils import load_submodules __copyright__ = "(c) 2019 MONAI Consortium" __version__tuple__ = (0, 0, 1) @@ -8,6 +9,5 @@ __basedir__ = os.path.dirname(__file__) - -loadSubmodules(sys.modules[__name__], False) # load directory modules only, skip loading individual files -loadSubmodules(sys.modules[__name__], True) # load all modules, this will trigger all export decorations +load_submodules(sys.modules[__name__], False) # load directory modules only, skip loading individual files +load_submodules(sys.modules[__name__], True) # load all modules, this will trigger all export decorations diff --git a/monai/application/__init__.py b/monai/application/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/application/__init__.py +++ b/monai/application/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/application/config/__init__.py b/monai/application/config/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/application/config/__init__.py +++ b/monai/application/config/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/application/config/deviceconfig.py b/monai/application/config/deviceconfig.py index eae4d1430d..e8cce35e4b 100644 --- a/monai/application/config/deviceconfig.py +++ b/monai/application/config/deviceconfig.py @@ -1,20 +1,23 @@ -import os, sys +import os +import sys from collections import OrderedDict -import monai + import numpy as np import torch +import monai + try: import ignite - ignite_version=ignite.__version__ + ignite_version = ignite.__version__ except ImportError: - ignite_version='NOT INSTALLED' + ignite_version = 'NOT INSTALLED' export = monai.utils.export("monai.application.config") @export -def getConfigValues(): +def get_config_values(): output = OrderedDict() output["MONAI version"] = monai.__version__ @@ -27,11 +30,11 @@ def getConfigValues(): @export -def printConfig(file=sys.stdout): - for kv in getConfigValues().items(): +def print_config(file=sys.stdout): + for kv in get_config_values().items(): print("%s: %s" % kv, file=file, flush=True) @export -def setVisibleDevices(*devInds): - os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, devInds)) +def set_visible_devices(*dev_inds): + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, dev_inds)) diff --git a/monai/application/engine/__init__.py b/monai/application/engine/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/application/engine/__init__.py +++ b/monai/application/engine/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/application/handlers/metric_logger.py b/monai/application/handlers/metric_logger.py index 3cbb873cf2..bb2fb07186 100644 --- a/monai/application/handlers/metric_logger.py +++ b/monai/application/handlers/metric_logger.py @@ -6,24 +6,24 @@ @monai.utils.export("monai.application.handlers") @monai.utils.alias("metriclogger") class MetricLogger: - def __init__(self,loss_transform=lambda x:x, metric_transform=lambda x:x): - self.loss_transform=loss_transform - self.metric_transform=metric_transform - self.loss=[] - self.metrics=defaultdict(list) - - def attach(self,engine): - return engine.add_event_handler(monai.application.engine.Events.ITERATION_COMPLETED,self) - - def __call__(self,engine): + + def __init__(self, loss_transform=lambda x: x, metric_transform=lambda x: x): + self.loss_transform = loss_transform + self.metric_transform = metric_transform + self.loss = [] + self.metrics = defaultdict(list) + + def attach(self, engine): + return engine.add_event_handler(monai.application.engine.Events.ITERATION_COMPLETED, self) + + def __call__(self, engine): self.loss.append(self.loss_transform(engine.state.output)) - - for m,v in engine.state.metrics.items(): - v=self.metric_transform(v) -# # metrics may not be added on the first timestep, pad the list if this is the case -# # so that each metric list is the same length as self.loss -# if len(self.metrics[m])==0: -# self.metrics[m].append([v[0]]*len(self.loss)) - + + for m, v in engine.state.metrics.items(): + v = self.metric_transform(v) + # # metrics may not be added on the first timestep, pad the list if this is the case + # # so that each metric list is the same length as self.loss + # if len(self.metrics[m])==0: + # self.metrics[m].append([v[0]]*len(self.loss)) + self.metrics[m].append(v) - \ No newline at end of file diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/data/augments/__init__.py b/monai/data/augments/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/data/augments/__init__.py +++ b/monai/data/augments/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/data/augments/augments.py b/monai/data/augments/augments.py index 17259e99c8..c2c04079a2 100644 --- a/monai/data/augments/augments.py +++ b/monai/data/augments/augments.py @@ -1,23 +1,24 @@ """ This contains the definitions of the commonly used argumentation functions. These apply operations to single instances -of data objects, which are tuples of numpy arrays where the first dimension if the channel dimension and others are +of data objects, which are tuples of numpy arrays where the first dimension if the channel dimension and others are component, height/width (CHW), or height/width/depth (CHWD). """ from functools import partial + import numpy as np -import scipy.ndimage import scipy.fftpack as ft +import scipy.ndimage -from monai.data.augments.decorators import augment, checkSegmentMargin -from monai.utils.arrayutils import randChoice, rescaleArray, copypasteArrays, resizeCenter -from monai.utils.convutils import oneHot +from monai.data.augments.decorators import augment, check_segment_margin +from monai.utils.arrayutils import (copypaste_arrays, rand_choice, rescale_array, resize_center) +from monai.utils.convutils import one_hot try: from PIL import Image - pilAvailable = True + PILAvailable = True except ImportError: - pilAvailable = False + PILAvailable = False @augment() @@ -31,7 +32,7 @@ def flip(*arrs): """Flip each of `arrs' with a random choice of up-down or left-right.""" def _flip(arr): - return arr[:, :, ::-1] if randChoice() else arr[:, ::-1] + return arr[:, :, ::-1] if rand_choice() else arr[:, ::-1] return _flip @@ -45,39 +46,39 @@ def rot90(*arrs): @augment(prob=1.0) def normalize(*arrs): """Normalize each of `arrs'.""" - return rescaleArray + return rescale_array @augment(prob=1.0) -def randPatch(*arrs, patchSize=(32, 32)): - """Randomly choose a patch from `arrs' of dimensions `patchSize'.""" - ph, pw = patchSize +def rand_patch(*arrs, patch_size=(32, 32)): + """Randomly choose a patch from `arrs' of dimensions `patch_size'.""" + ph, pw = patch_size - def _randPatch(im): + def _rand_patch(im): h, w = im.shape[1:3] ry = np.random.randint(0, h - ph) rx = np.random.randint(0, w - pw) - return im[:, ry : ry + ph, rx : rx + pw] + return im[:, ry:ry + ph, rx:rx + pw] - return _randPatch + return _rand_patch @augment() -@checkSegmentMargin -def shift(*arrs, dimFract=2, order=3): +@check_segment_margin +def shift(*arrs, dim_fract=2, order=3): """Shift arrays randomly by `dimfract' fractions of the array dimensions.""" testim = arrs[0] x, y = testim.shape[1:3] - shiftx = np.random.randint(-x // dimFract, x // dimFract) - shifty = np.random.randint(-y // dimFract, y // dimFract) + shiftx = np.random.randint(-x // dim_fract, x // dim_fract) + shifty = np.random.randint(-y // dim_fract, y // dim_fract) def _shift(im): c, h, w = im.shape[:3] dest = np.zeros_like(im) - srcslices, destslices = copypasteArrays(im, dest, (0, h // 2 + shiftx, w // 2 + shifty), - (0, h // 2, w // 2), (c, h, w)) + srcslices, destslices = copypaste_arrays(im, dest, (0, h // 2 + shiftx, w // 2 + shifty), (0, h // 2, w // 2), + (c, h, w)) dest[destslices] = im[srcslices] return dest @@ -86,7 +87,7 @@ def _shift(im): @augment() -@checkSegmentMargin +@check_segment_margin def rotate(*arrs): """Shift arrays randomly around the array center.""" @@ -99,7 +100,7 @@ def _rotate(im): @augment() -@checkSegmentMargin +@check_segment_margin def zoom(*arrs, zoomrange=0.2): """Return the image/mask pair zoomed by a random amount with the mask kept within `margin' pixels of the edges.""" @@ -109,23 +110,23 @@ def zoom(*arrs, zoomrange=0.2): def _zoom(im): ztemp = scipy.ndimage.zoom(im, (0, zx, zy) + tuple(1 for _ in range(1, im.ndim)), order=2) - return resizeCenter(ztemp, *im.shape) + return resize_center(ztemp, *im.shape) return _zoom @augment() -@checkSegmentMargin -def rotateZoomPIL(*arrs, margin=5, minFract=0.5, maxFract=2, resample=0): +@check_segment_margin +def rotate_zoom_pil(*arrs, margin=5, min_fract=0.5, max_fract=2, resample=0): assert all(a.ndim >= 2 for a in arrs) - assert pilAvailable, "PIL (pillow) not installed" + assert PILAvailable, "PIL (pillow) not installed" testim = arrs[0] x, y = testim.shape[1:3] angle = np.random.random() * 360 - zoomx = x + np.random.randint(-x * minFract, x * maxFract) - zoomy = y + np.random.randint(-y * minFract, y * maxFract) + zoomx = x + np.random.randint(-x * min_fract, x * max_fract) + zoomy = y + np.random.randint(-y * min_fract, y * max_fract) filters = (Image.NEAREST, Image.LINEAR, Image.BICUBIC) @@ -155,14 +156,14 @@ def _trans(im): @augment() -def deformPIL(*arrs, defrange=25, numControls=3, margin=2, mapOrder=1): - """Deforms arrays randomly with a deformation grid of size `numControls'**2 with `margins' grid values fixed.""" - assert pilAvailable, "PIL (pillow) not installed" +def deform_pil(*arrs, defrange=25, num_controls=3, margin=2, map_order=1): + """Deforms arrays randomly with a deformation grid of size `num_controls'**2 with `margins' grid values fixed.""" + assert PILAvailable, "PIL (pillow) not installed" h, w = arrs[0].shape[1:3] - imshift = np.zeros((2, numControls + margin * 2, numControls + margin * 2)) - imshift[:, margin:-margin, margin:-margin] = np.random.randint(-defrange, defrange, (2, numControls, numControls)) + imshift = np.zeros((2, num_controls + margin * 2, num_controls + margin * 2)) + imshift[:, margin:-margin, margin:-margin] = np.random.randint(-defrange, defrange, (2, num_controls, num_controls)) imshiftx = np.array(Image.fromarray(imshift[0]).resize((w, h), Image.QUAD)) imshifty = np.array(Image.fromarray(imshift[1]).resize((w, h), Image.QUAD)) @@ -170,30 +171,30 @@ def deformPIL(*arrs, defrange=25, numControls=3, margin=2, mapOrder=1): y, x = np.meshgrid(np.arange(w), np.arange(h)) indices = np.reshape(x + imshiftx, (-1, 1)), np.reshape(y + imshifty, (-1, 1)) - def _mapChannels(im): + def _map_channels(im): if im.ndim > 2: - return np.stack(list(map(_mapChannels, im))) + return np.stack(list(map(_map_channels, im))) elif im.ndim == 2: - result = scipy.ndimage.map_coordinates(im, indices, order=mapOrder, mode="constant") + result = scipy.ndimage.map_coordinates(im, indices, order=map_order, mode="constant") return result.reshape(im.shape) raise ValueError("Incorrect image shape: %r" % (im.shape,)) - return _mapChannels + return _map_channels @augment() -def distortFFT(*arrs, minDist=0.1, maxDist=1.0): +def distort_fft(*arrs, min_dist=0.1, max_dist=1.0): """Distorts arrays by applying dropout in k-space with a per-pixel probability based on distance from center.""" h, w = arrs[0].shape[:2] x, y = np.meshgrid(np.linspace(-1, 1, h), np.linspace(-1, 1, w)) - probfield = np.sqrt(x ** 2 + y ** 2) + probfield = np.sqrt(x**2 + y**2) if arrs[0].ndim == 3: probfield = np.repeat(probfield[..., np.newaxis], arrs[0].shape[2], 2) - dropout = np.random.uniform(minDist, maxDist, arrs[0].shape) > probfield + dropout = np.random.uniform(min_dist, max_dist, arrs[0].shape) > probfield def _distort(im): if im.ndim == 2: @@ -210,19 +211,19 @@ def _distort(im): return _distort -def splitSegmentation(*arrs, numLabels=2, segIndex=-1): +def split_segmentation(*arrs, num_labels=2, seg_index=-1): arrs = list(arrs) - seg = arrs[segIndex] - seg = oneHot(seg, numLabels) - arrs[segIndex] = seg + seg = arrs[seg_index] + seg = one_hot(seg, num_labels) + arrs[seg_index] = seg return tuple(arrs) -def mergeSegmentation(*arrs, segIndex=-1): +def merge_segmentation(*arrs, seg_index=-1): arrs = list(arrs) - seg = arrs[segIndex] + seg = arrs[seg_index] seg = np.argmax(seg, 2) - arrs[segIndex] = seg + arrs[seg_index] = seg return tuple(arrs) diff --git a/monai/data/augments/augmentstream.py b/monai/data/augments/augmentstream.py index 5c0ec46032..0a453767a9 100644 --- a/monai/data/augments/augmentstream.py +++ b/monai/data/augments/augmentstream.py @@ -1,8 +1,9 @@ - -from monai.data.streams.datastream import DataStream, BatchStream, OrderType from multiprocessing.pool import ThreadPool + import numpy as np +from monai.data.streams.datastream import BatchStream, DataStream, OrderType + class AugmentStream(DataStream): """Applies the given augmentations in generate() to each given value and yields the results.""" @@ -12,42 +13,42 @@ def __init__(self, src, augments=[]): self.augments = list(augments) def generate(self, val): - yield self.applyAugments(val) + yield self.apply_augments(val) - def applyAugments(self, arrays): + def apply_augments(self, arrays): """Applies augments to the data tuple `arrays` and returns the result.""" - toTuple = isinstance(arrays, np.ndarray) - arrays = (arrays,) if toTuple else arrays + to_tuple = isinstance(arrays, np.ndarray) + arrays = (arrays,) if to_tuple else arrays for aug in self.augments: arrays = aug(*arrays) - return arrays[0] if toTuple else arrays + return arrays[0] if to_tuple else arrays class ThreadAugmentStream(BatchStream, AugmentStream): """ Applies the given augmentations to each value from the source using multiple threads. Resulting batches are yielded - synchronously so the client must wait for the threads to complete. + synchronously so the client must wait for the threads to complete. """ - def __init__(self, src, batchSize, numThreads=None, augments=[], orderType=OrderType.LINEAR): - BatchStream.__init__(self, src, batchSize, False, orderType) + def __init__(self, src, batch_size, num_threads=None, augments=[], order_type=OrderType.LINEAR): + BatchStream.__init__(self, src, batch_size, False, order_type) AugmentStream.__init__(self, src, augments) - self.numThreads = numThreads + self.num_threads = num_threads self.pool = None - def _augmentThreadFunc(self, index, arrays): - self.buffer[index] = self.applyAugments(arrays) + def _augment_thread_func(self, index, arrays): + self.buffer[index] = self.apply_augments(arrays) - def applyAugmentsThreaded(self): - self.pool.starmap(self._augmentThreadFunc, enumerate(self.buffer)) + def apply_augments_threaded(self): + self.pool.starmap(self._augment_thread_func, enumerate(self.buffer)) - def bufferFull(self): - self.applyAugmentsThreaded() - super().bufferFull() + def buffer_full(self): + self.apply_augments_threaded() + super().buffer_full() def __iter__(self): - with ThreadPool(self.numThreads) as self.pool: - for srcVal in super().__iter__(): - yield srcVal + with ThreadPool(self.num_threads) as self.pool: + for src_val in super().__iter__(): + yield src_val diff --git a/monai/data/augments/decorators.py b/monai/data/augments/decorators.py index f4e6703bb2..c48adaf402 100644 --- a/monai/data/augments/decorators.py +++ b/monai/data/augments/decorators.py @@ -1,72 +1,75 @@ from functools import wraps -from monai.utils.arrayutils import randChoice, zeroMargins + import numpy as np +from monai.utils.arrayutils import rand_choice, zero_margins + -def augment(prob=0.5, applyIndices=None): +def augment(prob=0.5, apply_indices=None): """ Creates an augmentation function when decorating to a function returning an array-modifying callable. The function this decorates is given the list of input arrays as positional arguments and then should return a callable operation which performs the augmentation. This wrapper then chooses whether to apply the operation to the arguments and if so - to which ones. The `prob' argument states the probability the augment is applied, `applyIndices' gives indices of + to which ones. The `prob' argument states the probability the augment is applied, `apply_indices' gives indices of the arrays to apply to (or None for all). The arguments are also keyword arguments in the resulting function. """ def _inner(func): + @wraps(func) def _func(*args, **kwargs): _prob = kwargs.pop("prob", prob) # get the probability of applying this augment - if _prob < 1.0 and not randChoice(_prob): # if not chosen just return the original argument + if _prob < 1.0 and not rand_choice(_prob): # if not chosen just return the original argument return args - _applyIndices = kwargs.pop("applyIndices", applyIndices) + _apply_indices = kwargs.pop("apply_indices", apply_indices) op = func(*args, **kwargs) - indices = list(_applyIndices or range(len(args))) + indices = list(_apply_indices or range(len(args))) return tuple((op(im) if i in indices else im) for i, im in enumerate(args)) if _func.__doc__: _func.__doc__ += """ - + Added keyword arguments: prob: probability of applying this augment (default: 0.5) - applyIndices: indices of arrays to apply augment to (default: None meaning all) + apply_indices: indices of arrays to apply augment to (default: None meaning all) """ return _func return _inner -def checkSegmentMargin(func): +def check_segment_margin(func): """ Decorate an augment callable `func` with a check to ensure a given segmentation image in the set does not touch the margins of the image when geometric transformations are applied. The keyword arguments `margin`, - `maxCount` and `nonzeroIndex` are used to check the image at index `nonzeroIndex` has the given margin of - pixels around its edges, trying `maxCount` number of times to get a modifier by calling `func` before - giving up and producing a identity modifier in its place. + `max_count` and `nonzero_index` are used to check the image at index `nonzero_index` has the given margin of + pixels around its edges, trying `max_count` number of times to get a modifier by calling `func` before + giving up and producing a identity modifier in its place. """ @wraps(func) def _check(*args, **kwargs): margin = max(1, kwargs.pop("margin", 5)) - maxCount = max(1, kwargs.pop("maxCount", 5)) - nonzeroIndex = kwargs.pop("nonzeroIndex", -1) - acceptedOutput = False + max_count = max(1, kwargs.pop("max_count", 5)) + nonzero_index = kwargs.pop("nonzero_index", -1) + accepted_output = False - while maxCount > 0 and not acceptedOutput: + while max_count > 0 and not accepted_output: op = func(*args, **kwargs) - maxCount -= 1 + max_count -= 1 - if nonzeroIndex == -1: - acceptedOutput = True + if nonzero_index == -1: + accepted_output = True else: - seg = op(args[nonzeroIndex]).astype(np.int32) - acceptedOutput = zeroMargins(seg, margin) + seg = op(args[nonzero_index]).astype(np.int32) + accepted_output = zero_margins(seg, margin) - if not acceptedOutput: - op = lambda arr: arr + if not accepted_output: + return lambda arr: arr return op diff --git a/monai/data/readers/__init__.py b/monai/data/readers/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/data/readers/__init__.py +++ b/monai/data/readers/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/data/readers/arrayreader.py b/monai/data/readers/arrayreader.py index 4a06d7d09c..b39aa1c08f 100644 --- a/monai/data/readers/arrayreader.py +++ b/monai/data/readers/arrayreader.py @@ -1,8 +1,10 @@ from threading import Lock + +import numpy as np + import monai -from monai.utils.decorators import RestartGenerator from monai.data.streams import DataStream, OrderType -import numpy as np +from monai.utils.decorators import RestartGenerator @monai.utils.export("monai.data.readers") @@ -16,87 +18,87 @@ class ArrayReader(DataStream): optionally only once. """ - def __init__(self, *arrays, orderType=OrderType.LINEAR, doOnce=False, choiceProbs=None): - if orderType not in (OrderType.SHUFFLE, OrderType.CHOICE, OrderType.LINEAR): - raise ValueError("Invalid orderType value %r" % (orderType,)) + def __init__(self, *arrays, order_type=OrderType.LINEAR, do_once=False, choice_probs=None): + if order_type not in (OrderType.SHUFFLE, OrderType.CHOICE, OrderType.LINEAR): + raise ValueError("Invalid order_type value %r" % (order_type,)) self.arrays = () - self.orderType = orderType - self.doOnce = doOnce - self.choiceProbs = None + self.order_type = order_type + self.do_once = do_once + self.choice_probs = None self.lock = Lock() - super().__init__(RestartGenerator(self.yieldArrays)) + super().__init__(RestartGenerator(self.yield_arrays)) - self.appendArrays(*arrays, choiceProbs=choiceProbs) + self.append_arrays(*arrays, choice_probs=choice_probs) - def yieldArrays(self): - while self.isRunning: + def yield_arrays(self): + while self.is_running: with self.lock: # capture locally so that emptying the reader doesn't interfere with an on-going interation arrays = self.arrays - choiceProbs = self.choiceProbs + choice_probs = self.choice_probs indices = np.arange(arrays[0].shape[0] if arrays else 0) - if self.orderType == OrderType.SHUFFLE: + if self.order_type == OrderType.SHUFFLE: np.random.shuffle(indices) - elif self.orderType == OrderType.CHOICE: - indices = np.random.choice(indices, indices.shape, p=choiceProbs) + elif self.order_type == OrderType.CHOICE: + indices = np.random.choice(indices, indices.shape, p=choice_probs) for i in indices: yield tuple(arr[i] for arr in arrays) - if self.doOnce or not arrays: # stop first time through or if empty + if self.do_once or not arrays: # stop first time through or if empty break - def getSubArrays(self, indices): + def get_sub_arrays(self, indices): """Get a new ArrayReader with a subset of this one's data defined by the `indices` list.""" with self.lock: - subArrays = [a[indices] for a in self.arrays] - subProbs = None + sub_arrays = [a[indices] for a in self.arrays] + sub_probs = None - if self.choiceProbs is not None: - subProbs = self.choiceProbs[indices] - subProbs = subProbs / np.sum(subProbs) + if self.choice_probs is not None: + sub_probs = self.choice_probs[indices] + sub_probs = sub_probs / np.sum(sub_probs) - return ArrayReader(*subArrays, orderType=self.orderType, doOnce=self.doOnce, choiceProbs=subProbs) + return ArrayReader(*sub_arrays, order_type=self.order_type, do_once=self.do_once, choice_probs=sub_probs) - def appendArrays(self, *arrays, choiceProbs=None): + def append_arrays(self, *arrays, choice_probs=None): """ Append the given arrays to the existing entries in self.arrays, or replacing self.arrays if this is empty. If - `choiceProbs` is provided this is appended to self.choiceProbs, or replaces it if the latter is None or empty. + `choice_probs` is provided this is appended to self.choice_probs, or replaces it if the latter is None or empty. """ - arrayLen = arrays[0].shape[0] if arrays else 0 + array_len = arrays[0].shape[0] if arrays else 0 - if arrayLen > 0 and any(arr.shape[0] != arrayLen for arr in arrays): + if array_len > 0 and any(arr.shape[0] != array_len for arr in arrays): raise ValueError("All input arrays must have the same length for dimension 0") with self.lock: if not self.arrays and arrays: self.arrays = tuple(arrays) - elif arrayLen > 0: + elif array_len > 0: self.arrays = tuple(np.concatenate(ht) for ht in zip(self.arrays, arrays)) - if self.arrays and choiceProbs is not None and choiceProbs.shape[0] > 0: - choiceProbs = np.atleast_1d(choiceProbs) + if self.arrays and choice_probs is not None and choice_probs.shape[0] > 0: + choice_probs = np.atleast_1d(choice_probs) - if choiceProbs.shape[0] != arrayLen: - raise ValueError("Length of choiceProbs (%i) must match that of input arrays (%i)" % - (self.choiceProbs.shape[0], arrayLen)) + if choice_probs.shape[0] != array_len: + raise ValueError("Length of choice_probs (%i) must match that of input arrays (%i)" % + (self.choice_probs.shape[0], array_len)) - if self.choiceProbs is None: - self.choiceProbs = choiceProbs + if self.choice_probs is None: + self.choice_probs = choice_probs else: - self.choiceProbs = np.concatenate([self.choiceProbs, choiceProbs]) + self.choice_probs = np.concatenate([self.choice_probs, choice_probs]) - self.choiceProbs = self.choiceProbs / np.sum(self.choiceProbs) + self.choice_probs = self.choice_probs / np.sum(self.choice_probs) - def emptyArrays(self): - """Clear the stored arrays and choiceProbs so that this reader is empty but functional.""" + def empty_arrays(self): + """Clear the stored arrays and choice_probs so that this reader is empty but functional.""" with self.lock: self.arrays = () - self.choiceProbs = None if self.choiceProbs is None else self.choiceProbs[:0] + self.choice_probs = None if self.choice_probs is None else self.choice_probs[:0] def __len__(self): return len(self.arrays[0]) if self.arrays else 0 diff --git a/monai/data/readers/npzreader.py b/monai/data/readers/npzreader.py index 6064782540..a4e1c7f2cf 100644 --- a/monai/data/readers/npzreader.py +++ b/monai/data/readers/npzreader.py @@ -8,23 +8,23 @@ class NPZReader(ArrayReader): """ Loads arrays from an .npz file as the source data. Other values can be loaded from the file and stored in - `otherValues` rather than used as source data. + `other_values` rather than used as source data. """ - def __init__(self, objOrFileName, arrayNames, otherValues=[], - orderType=OrderType.LINEAR, doOnce=False, choiceProbs=None): - self.objOrFileName = objOrFileName + def __init__(self, obj_or_file_name, array_names, other_values=[], + order_type=OrderType.LINEAR, do_once=False, choice_probs=None): + self.objOrFileName = obj_or_file_name - dat = np.load(objOrFileName) + dat = np.load(obj_or_file_name) keys = set(dat.keys()) - missing = set(arrayNames) - keys + missing = set(array_names) - keys if missing: raise ValueError("Array name(s) %r not in loaded npz file" % (missing,)) - arrays = [dat[name] for name in arrayNames] + arrays = [dat[name] for name in array_names] - super().__init__(*arrays, orderType=orderType, doOnce=doOnce, choiceProbs=choiceProbs) + super().__init__(*arrays, order_type=order_type, do_once=do_once, choice_probs=choice_probs) - self.otherValues = {n: dat[n] for n in otherValues if n in keys} + self.otherValues = {n: dat[n] for n in other_values if n in keys} diff --git a/monai/data/streams/datastream.py b/monai/data/streams/datastream.py index 6fdad5a3cc..027f9d58f3 100644 --- a/monai/data/streams/datastream.py +++ b/monai/data/streams/datastream.py @@ -1,9 +1,11 @@ +from functools import wraps + +import numpy as np + import monai from monai.utils.aliases import alias -from monai.utils.mathutils import zipWith from monai.utils.decorators import RestartGenerator -from functools import wraps -import numpy as np +from monai.utils.mathutils import zip_with export = monai.utils.export("monai.data.streams") @@ -20,51 +22,51 @@ class OrderType(object): @alias("datastream") class DataStream(object): """ - The DataStream class represents a chain of iterable objects where one iterates over its source and in turn yields - values which are possibly transformed. This allows an intermediate object in the stream to modify a data element - which passes through the stream or generate more than one output value for each input. A sequence of stream objects + The DataStream class represents a chain of iterable objects where one iterates over its source and in turn yields + values which are possibly transformed. This allows an intermediate object in the stream to modify a data element + which passes through the stream or generate more than one output value for each input. A sequence of stream objects is created by using one stream as the source to another. - - This relies on an input source which must be an iterable. Values are taken from this in order and then passed to the - generate() generator method to produce one or more items, which are then yielded. Subclasses can override generate() - to produce filter or transformer types to place in a sequence of DataStream objects. The `streamgen` decorator can - be used to do the same. - - Internal infrastructure can be setup when the iteration starts and can rely on the self.isRunning to indicate when + + This relies on an input source which must be an iterable. Values are taken from this in order and then passed to the + generate() generator method to produce one or more items, which are then yielded. Subclasses can override generate() + to produce filter or transformer types to place in a sequence of DataStream objects. The `streamgen` decorator can + be used to do the same. + + Internal infrastructure can be setup when the iteration starts and can rely on the self.is_running to indicate when generation is expected. When this changes to False methods are expected to cleanup and exit gracefully, and be able - to be called again with isRunning set back to True. This allows restarting a complex stream object which may use - threads requiring starting and stopping. The stop() method when called set isRunning to False and attempts to call + to be called again with is_running set back to True. This allows restarting a complex stream object which may use + threads requiring starting and stopping. The stop() method when called set is_running to False and attempts to call the same on self.src, this is meant to be used to stop any internal processes (ie. threads) when iteration stops - with the expectation that it can be restarted later. Reading isRunning or assigning a literal value to it is atomic + with the expectation that it can be restarted later. Reading is_running or assigning a literal value to it is atomic thus thread-safe but keep this in mind when assigning a compound expression. """ def __init__(self, src): - """Initialize with `src' as the source iterable, and self.isRunning as True.""" + """Initialize with `src' as the source iterable, and self.is_running as True.""" self.src = src - self.isRunning = True + self.is_running = True def __iter__(self): """ Iterate over every value from self.src, passing through self.generate() and yielding the values it generates. """ - self.isRunning = True - for srcVal in self.src: - for outVal in self.generate(srcVal): - yield outVal # yield with syntax too new? + self.is_running = True + for src_val in self.src: + for out_val in self.generate(src_val): + yield out_val # yield with syntax too new? def generate(self, val): """Generate values from input `val`, by default just yields that. """ yield val def stop(self): - """Sets self.isRunning to False and calls stop() on self.src if it has this method.""" - self.isRunning = False + """Sets self.is_running to False and calls stop() on self.src if it has this method.""" + self.is_running = False if callable(getattr(self.src, "stop", None)): self.src.stop() - def getGenFunc(self): + def get_gen_func(self): """Returns a callable taking no arguments which produces the next item in the stream whenever called.""" stream = iter(self) return lambda: next(stream) @@ -80,14 +82,14 @@ def __init__(self, src, func, fargs, fkwargs): self.fkwargs = fkwargs def generate(self, val): - for outVal in self.func(val, *self.fargs, **self.fkwargs): - yield outVal + for out_val in self.func(val, *self.fargs, **self.fkwargs): + yield out_val @export def streamgen(func): """ - Converts a generator function into a constructor for creating FuncStream instances + Converts a generator function into a constructor for creating FuncStream instances using the function as the generator. """ @@ -96,7 +98,7 @@ def _wrapper(src, *args, **kwargs): return FuncStream(src, func, args, kwargs) return _wrapper - + @export @alias("cachestream") @@ -106,55 +108,55 @@ class CacheStream(DataStream): order, shuffled, or by choice indefinitely. """ - def __init__(self, src, bufferSize=None, orderType=OrderType.LINEAR): + def __init__(self, src, buffer_size=None, order_type=OrderType.LINEAR): super().__init__(src) - self.bufferSize = bufferSize - self.orderType = orderType + self.buffer_size = buffer_size + self.order_type = order_type self.buffer = [] - + def __iter__(self): - self.buffer=[item for i, item in enumerate(self.src) if self.bufferSize is None or i 0: yield self.buffer.pop(0) @@ -167,28 +169,28 @@ def generate(self, val): class BatchStream(BufferStream): """Collects values from the source together into a batch of the stated size, ie. stacks buffered items.""" - def __init__(self, src, batchSize, sendShortBatch=False, orderType=OrderType.LINEAR): - super().__init__(src, batchSize, orderType) - self.sendShortBatch = sendShortBatch + def __init__(self, src, batch_size, send_short_batch=False, order_type=OrderType.LINEAR): + super().__init__(src, batch_size, order_type) + self.send_short_batch = send_short_batch - def bufferFull(self): + def buffer_full(self): """Replaces the buffer's contents with the arrays stacked together into a single item.""" if isinstance(self.buffer[0], np.ndarray): # stack all the arrays together batch = np.stack(self.buffer) else: # stack the arrays from each item into one - batch = tuple(zipWith(np.stack, *self.buffer)) + batch = tuple(zip_with(np.stack, *self.buffer)) self.buffer[:] = [batch] # yield only the one item when emptying the buffer def __iter__(self): - for srcVal in super().__iter__(): - yield srcVal + for src_val in super().__iter__(): + yield src_val # only true if the iteration has completed but items are left to make up a shortened batch - if len(self.buffer) > 0 and self.sendShortBatch: - self.bufferFull() + if len(self.buffer) > 0 and self.send_short_batch: + self.buffer_full() yield self.buffer.pop() @@ -199,13 +201,13 @@ class MergeStream(DataStream): def __init__(self, *srcs): self.srcs = srcs - super().__init__(RestartGenerator(self.yieldMergedValues)) + super().__init__(RestartGenerator(self.yield_merged_values)) - def yieldMergedValues(self): + def yield_merged_values(self): iters = [iter(s) for s in self.srcs] - canContinue = True + can_continue = True - while self.isRunning and canContinue: + while self.is_running and can_continue: try: values = [] for it in iters: @@ -216,88 +218,91 @@ def yieldMergedValues(self): values.append(tuple(val)) - srcVal = sum(values, ()) + src_val = sum(values, ()) - for outVal in self.generate(srcVal): - yield outVal + for out_val in self.generate(src_val): + yield out_val # must be caught as StopIteration won't propagate but magically mutate into RuntimeError except StopIteration: - canContinue = False + can_continue = False @export @alias("cyclingstream") class CyclingStream(DataStream): + def __init__(self, *srcs): self.srcs = srcs - super().__init__(RestartGenerator(self.yieldAlternatingValues)) + super().__init__(RestartGenerator(self.yield_alternating_values)) - def yieldAlternatingValues(self): + def yield_alternating_values(self): iters = [iter(s) for s in self.srcs] - canContinue = True + can_continue = True - while self.isRunning and canContinue: + while self.is_running and can_continue: try: for it in iters: - srcVal = next(it) # raises StopIteration when a source runs out of data at which point we quit - for outVal in self.generate(srcVal): - yield outVal + src_val = next(it) # raises StopIteration when a source runs out of data at which point we quit + for out_val in self.generate(src_val): + yield out_val # must be caught as StopIteration won't propagate but magically mutate into RuntimeError except StopIteration: - canContinue = False + can_continue = False @export class PrefetchStream(DataStream): """ Calculates item dtype and shape before iteration. This will get a value from `src` in the constructor, assign it to - self.srcVal, then assign the dtypes and shapes of the arrays to self.dtypes and self.shapes respectively. When it is - iterated over self.srcVal is yielded first followed by whatever else `src` produces so no data is lost. + self.src_val, then assign the dtypes and shapes of the arrays to self.dtypes and self.shapes respectively. When it is + iterated over self.src_val is yielded first followed by whatever else `src` produces so no data is lost. """ def __init__(self, src): self.origSrc = src self.it = iter(src) - self.srcVal = next(self.it) + self.src_val = next(self.it) - if isinstance(self.srcVal, np.ndarray): - self.dtypes = self.srcVal.dtype - self.shapes = self.srcVal.shape + if isinstance(self.src_val, np.ndarray): + self.dtypes = self.src_val.dtype + self.shapes = self.src_val.shape else: - self.dtypes = tuple(b.dtype for b in self.srcVal) - self.shapes = tuple(b.shape for b in self.srcVal) + self.dtypes = tuple(b.dtype for b in self.src_val) + self.shapes = tuple(b.shape for b in self.src_val) - super().__init__(RestartGenerator(self._getSrc)) + super().__init__(RestartGenerator(self._get_src)) - def _getSrc(self): + def _get_src(self): if self.it is not None: - yield self.srcVal + yield self.src_val else: self.it = iter(self.origSrc) # self.it is None when restarting so recreate the iterator here - for srcVal in self.it: - yield srcVal + for src_val in self.it: + yield src_val self.it = None - + @export -@alias("finitestream") +@alias("finitestream") class FiniteStream(DataStream): """Yields only the specified number of items before quiting.""" - def __init__(self, src, numItems): + + def __init__(self, src, num_items): super().__init__(src) - self.numItems = numItems - + self.num_items = num_items + def __iter__(self): - for _, item in zip(range(self.numItems), super().__iter__()): + for _, item in zip(range(self.num_items), super().__iter__()): yield item - + @export @alias("tracestream") class TraceStream(DataStream): + def generate(self, val): vals = val if isinstance(val, (tuple, list)) else (val,) diff --git a/monai/data/streams/threadbufferstream.py b/monai/data/streams/threadbufferstream.py index cd3908df50..b4bb91f4a5 100644 --- a/monai/data/streams/threadbufferstream.py +++ b/monai/data/streams/threadbufferstream.py @@ -1,8 +1,9 @@ +from queue import Empty, Full, Queue +from threading import Thread + import monai -from monai.utils.aliases import alias from monai.data.streams import DataStream -from queue import Queue, Full, Empty -from threading import Thread +from monai.utils.aliases import alias @monai.utils.export("monai.data.streams") @@ -12,26 +13,26 @@ class ThreadBufferStream(DataStream): Iterates over values from self.src in a separate thread but yielding them in the current thread. This allows values to be queued up asynchronously. The internal thread will continue running so long as the source has values or until the stop() method is called. - - One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being + + One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being iterated over, so if the thread hasn't finished another attempt to iterate over it will raise an exception or yield - inexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must - be called which will join with the thread. + inexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must + be called which will join with the thread. """ - def __init__(self, src, bufferSize=1, timeout=0.01): + def __init__(self, src, buffer_size=1, timeout=0.01): super().__init__(src) - self.bufferSize = bufferSize + self.buffer_size = buffer_size self.timeout = timeout - self.buffer = Queue(self.bufferSize) - self.genThread = None + self.buffer = Queue(self.buffer_size) + self.gen_thread = None - def enqueueValues(self): + def enqueue_values(self): # allows generate() to be overridden and used here (instead of iter(self.src)) - for srcVal in super().__iter__(): - while self.isRunning: + for src_val in super().__iter__(): + while self.is_running: try: - self.buffer.put(srcVal, timeout=self.timeout) + self.buffer.put(src_val, timeout=self.timeout) except Full: pass # try to add the item again else: @@ -41,16 +42,16 @@ def enqueueValues(self): def stop(self): super().stop() - if self.genThread is not None: - self.genThread.join() + if self.gen_thread is not None: + self.gen_thread.join() def __iter__(self): - self.genThread = Thread(target=self.enqueueValues, daemon=True) - self.genThread.start() - self.isRunning = True + self.gen_thread = Thread(target=self.enqueue_values, daemon=True) + self.gen_thread.start() + self.is_running = True try: - while self.isRunning and (self.genThread.is_alive() or not self.buffer.empty()): + while self.is_running and (self.gen_thread.is_alive() or not self.buffer.empty()): try: yield self.buffer.get(timeout=self.timeout) except Empty: diff --git a/monai/data/transforms/noise_adder.py b/monai/data/transforms/noise_adder.py index 064dda3a21..a9900bc86c 100644 --- a/monai/data/transforms/noise_adder.py +++ b/monai/data/transforms/noise_adder.py @@ -14,4 +14,3 @@ def __init__(self, noise): def _handle_any(self, img): return img + self.noise - diff --git a/monai/networks/__init__.py b/monai/networks/__init__.py index 8b13789179..e69de29bb2 100644 --- a/monai/networks/__init__.py +++ b/monai/networks/__init__.py @@ -1 +0,0 @@ - diff --git a/monai/networks/layers/__init__.py b/monai/networks/layers/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/networks/layers/__init__.py +++ b/monai/networks/layers/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/networks/layers/convolutions.py b/monai/networks/layers/convolutions.py index 8cd33ef5e3..d21d4cfa2d 100644 --- a/monai/networks/layers/convolutions.py +++ b/monai/networks/layers/convolutions.py @@ -1,73 +1,95 @@ -import torch.nn as nn import numpy as np +import torch.nn as nn -from monai.utils.convutils import samePadding -from monai.networks.utils import getConvType, getDropoutType, getNormalizeType +from monai.networks.utils import (get_conv_type, get_dropout_type, get_normalize_type) +from monai.utils.convutils import same_padding class Convolution(nn.Sequential): - def __init__(self, dimensions, inChannels, outChannels, strides=1, kernelSize=3, instanceNorm=True, - dropout=0, dilation=1, bias=True, convOnly=False, isTransposed=False): + + def __init__(self, + dimensions, + in_channels, + out_channels, + strides=1, + kernel_size=3, + instance_norm=True, + dropout=0, + dilation=1, + bias=True, + conv_only=False, + is_transposed=False): super().__init__() self.dimensions = dimensions - self.inChannels = inChannels - self.outChannels = outChannels - self.isTransposed = isTransposed + self.in_channels = in_channels + self.out_channels = out_channels + self.is_transposed = is_transposed - padding = samePadding(kernelSize, dilation) - normalizeType = getNormalizeType(dimensions, instanceNorm) - convType = getConvType(dimensions, isTransposed) - dropType = getDropoutType(dimensions) + padding = same_padding(kernel_size, dilation) + normalize_type = get_normalize_type(dimensions, instance_norm) + conv_type = get_conv_type(dimensions, is_transposed) + drop_type = get_dropout_type(dimensions) - if isTransposed: - conv = convType(inChannels, outChannels, kernelSize, strides, padding, strides - 1, 1, bias, dilation) + if is_transposed: + conv = conv_type(in_channels, out_channels, kernel_size, strides, padding, strides - 1, 1, bias, dilation) else: - conv = convType(inChannels, outChannels, kernelSize, strides, padding, dilation, bias=bias) + conv = conv_type(in_channels, out_channels, kernel_size, strides, padding, dilation, bias=bias) self.add_module("conv", conv) - if not convOnly: - self.add_module("norm", normalizeType(outChannels)) + if not conv_only: + self.add_module("norm", normalize_type(out_channels)) if dropout > 0: # omitting Dropout2d appears faster than relying on it short-circuiting when dropout==0 - self.add_module("dropout", dropType(dropout)) + self.add_module("dropout", drop_type(dropout)) self.add_module("prelu", nn.modules.PReLU()) class ResidualUnit(nn.Module): - def __init__(self, dimensions, inChannels, outChannels, strides=1, kernelSize=3, subunits=2, instanceNorm=True, - dropout=0, dilation=1, bias=True, lastConvOnly=False): + + def __init__(self, + dimensions, + in_channels, + out_channels, + strides=1, + kernel_size=3, + subunits=2, + instance_norm=True, + dropout=0, + dilation=1, + bias=True, + last_conv_only=False): super().__init__() self.dimensions = dimensions - self.inChannels = inChannels - self.outChannels = outChannels + self.in_channels = in_channels + self.out_channels = out_channels self.conv = nn.Sequential() self.residual = nn.Identity() - padding = samePadding(kernelSize, dilation) - schannels = inChannels + padding = same_padding(kernel_size, dilation) + schannels = in_channels sstrides = strides subunits = max(1, subunits) for su in range(subunits): - convOnly = lastConvOnly and su == (subunits - 1) - unit = Convolution(dimensions, schannels, outChannels, sstrides, kernelSize, instanceNorm, dropout, - dilation, bias, convOnly) + conv_only = last_conv_only and su == (subunits - 1) + unit = Convolution(dimensions, schannels, out_channels, sstrides, kernel_size, instance_norm, dropout, + dilation, bias, conv_only) self.conv.add_module("unit%i" % su, unit) - schannels = outChannels # after first loop set channels and strides to what they should be for subsequent units + schannels = out_channels # after first loop set channels and strides to what they should be for subsequent units sstrides = 1 # apply convolution to input to change number of output channels and size to match that coming from self.conv - if np.prod(strides) != 1 or inChannels != outChannels: - rkernelSize = kernelSize + if np.prod(strides) != 1 or in_channels != out_channels: + rkernel_size = kernel_size rpadding = padding if np.prod(strides) == 1: # if only adapting number of channels a 1x1 kernel is used with no padding - rkernelSize = 1 + rkernel_size = 1 rpadding = 0 - convType = getConvType(dimensions, False) - self.residual = convType(inChannels, outChannels, rkernelSize, strides, rpadding, bias=bias) + conv_type = get_conv_type(dimensions, False) + self.residual = conv_type(in_channels, out_channels, rkernel_size, strides, rpadding, bias=bias) def forward(self, x): res = self.residual(x) # create the additive residual from x diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 74f7a59108..9e8e953163 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -5,13 +5,13 @@ class SkipConnection(nn.Module): """Concats the forward pass input with the result from the given submodule.""" - def __init__(self, submodule, catDim=1): + def __init__(self, submodule, cat_dim=1): super().__init__() self.submodule = submodule - self.catDim = catDim + self.cat_dim = cat_dim def forward(self, x): - return torch.cat([x, self.submodule(x)], self.catDim) + return torch.cat([x, self.submodule(x)], self.cat_dim) class Flatten(nn.Module): diff --git a/monai/networks/losses/__init__.py b/monai/networks/losses/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/networks/losses/__init__.py +++ b/monai/networks/losses/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/networks/losses/dice.py b/monai/networks/losses/dice.py index b8d29b170e..ca2417f5bf 100644 --- a/monai/networks/losses/dice.py +++ b/monai/networks/losses/dice.py @@ -1,31 +1,31 @@ import torch from torch.nn.modules.loss import _Loss -from monai.utils.aliases import alias +from monai.networks.utils import one_hot from monai.utils import export -from monai.networks.utils import oneHot +from monai.utils.aliases import alias @export("monai.networks.losses") @alias("dice", "Dice") class DiceLoss(_Loss): """ - Multiclass dice loss. Input logits 'pred' (BNHW[D] where N is number of classes) is compared with ground truth - `ground' (B1HW[D]). Axis N of `pred' is expected to have logit predictions for each class rather than being image - channels, while the same axis of `ground' should be 1. If the N channel of `pred' is 1 binary dice loss will be - calculated. The `smooth' parameter is a value added to the intersection and union components of the inter-over-union - calculation to smooth results and prevent divide-by-0, this value should be small. The `includeBackground' class + Multiclass dice loss. Input logits 'pred' (BNHW[D] where N is number of classes) is compared with ground truth + `ground' (B1HW[D]). Axis N of `pred' is expected to have logit predictions for each class rather than being image + channels, while the same axis of `ground' should be 1. If the N channel of `pred' is 1 binary dice loss will be + calculated. The `smooth' parameter is a value added to the intersection and union components of the inter-over-union + calculation to smooth results and prevent divide-by-0, this value should be small. The `include_background' class attribute can be set to False for an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background. If the non-background segmentations are small compared to the total image size they can get overwhelmed by the signal from the background so excluding it in such cases helps convergence. """ - def __init__(self, includeBackground=True): + def __init__(self, include_background=True): """ - If `includeBackground` is False channel index 0 (background category) is excluded from the calculation. + If `include_background` is False channel index 0 (background category) is excluded from the calculation. """ super().__init__() - self.includeBackground = includeBackground + self.includeBackground = include_background def forward(self, pred, ground, smooth=1e-5): if ground.shape[1] != 1: @@ -35,14 +35,14 @@ def forward(self, pred, ground, smooth=1e-5): psum = pred.float().sigmoid() tsum = ground else: - pinds=(0, 3, 1, 2) if len(ground.shape)==4 else (0, 4, 1, 2, 3) + pinds = (0, 3, 1, 2) if len(ground.shape) == 4 else (0, 4, 1, 2, 3) # multiclass dice loss, use softmax in the first dimension and convert target to one-hot encoding psum = torch.softmax(pred, 1) - tsum = oneHot(ground, pred.shape[1]) # BCHW(D) -> BCHW(D)N + tsum = one_hot(ground, pred.shape[1]) # BCHW(D) -> BCHW(D)N tsum = tsum[:, 0].permute(*pinds).contiguous() # BCHW(D)N -> BNHW(D) - assert tsum.shape == pred.shape, ("Ground truth one-hot has differing shape (%r) from source (%r)" - % (tsum.shape, pred.shape)) + assert tsum.shape == pred.shape, ("Ground truth one-hot has differing shape (%r) from source (%r)" % + (tsum.shape, pred.shape)) # exclude background category so that it doesn't overwhelm the other segmentations if they are small if not self.includeBackground: diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index 139597f9cb..e69de29bb2 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -1,2 +0,0 @@ - - diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index 2d7220b102..73d99f958c 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -1,31 +1,41 @@ import torch.nn as nn -from monai.networks.layers.simplelayers import SkipConnection from monai.networks.layers.convolutions import Convolution, ResidualUnit -from monai.networks.utils import predictSegmentation -from monai.utils.aliases import alias +from monai.networks.layers.simplelayers import SkipConnection +from monai.networks.utils import predict_segmentation from monai.utils import export +from monai.utils.aliases import alias @export("monai.networks.nets") @alias("Unet", "unet") class UNet(nn.Module): - def __init__(self, dimensions, inChannels, numClasses, channels, strides, kernelSize=3, upKernelSize=3, - numResUnits=0, instanceNorm=True, dropout=0): + + def __init__(self, + dimensions, + in_channels, + num_classes, + channels, + strides, + kernel_size=3, + up_kernel_size=3, + num_res_units=0, + instance_norm=True, + dropout=0): super().__init__() assert len(channels) == (len(strides) + 1) self.dimensions = dimensions - self.inChannels = inChannels - self.numClasses = numClasses + self.in_channels = in_channels + self.num_classes = num_classes self.channels = channels self.strides = strides - self.kernelSize = kernelSize - self.upKernelSize = upKernelSize - self.numResUnits = numResUnits - self.instanceNorm = instanceNorm + self.kernel_size = kernel_size + self.up_kernel_size = up_kernel_size + self.num_res_units = num_res_units + self.instance_norm = instance_norm self.dropout = dropout - def _createBlock(inc, outc, channels, strides, isTop): + def _create_block(inc, outc, channels, strides, is_top): """ Builds the UNet structure from the bottom up by recursing down to the bottom block, then creating sequential blocks containing the downsample path, a skip connection around the previous block, and the upsample path. @@ -34,42 +44,56 @@ def _createBlock(inc, outc, channels, strides, isTop): s = strides[0] if len(channels) > 2: - subblock = _createBlock(c, c, channels[1:], strides[1:], False) # continue recursion down + subblock = _create_block(c, c, channels[1:], strides[1:], False) # continue recursion down upc = c * 2 else: # the next layer is the bottom so stop recursion, create the bottom layer as the sublock for this layer - subblock = self._getBottomLayer(c, channels[1]) + subblock = self._get_bottom_layer(c, channels[1]) upc = c + channels[1] - down = self._getDownLayer(inc, c, s, isTop) # create layer in downsampling path - up = self._getUpLayer(upc, outc, s, isTop) # create layer in upsampling path + down = self._get_down_layer(inc, c, s, is_top) # create layer in downsampling path + up = self._get_up_layer(upc, outc, s, is_top) # create layer in upsampling path return nn.Sequential(down, SkipConnection(subblock), up) - self.model = _createBlock(inChannels, numClasses, self.channels, self.strides, True) + self.model = _create_block(in_channels, num_classes, self.channels, self.strides, True) - def _getDownLayer(self, inChannels, outChannels, strides, isTop): - if self.numResUnits > 0: - return ResidualUnit(self.dimensions, inChannels, outChannels, strides, self.kernelSize, self.numResUnits, - self.instanceNorm, self.dropout) + def _get_down_layer(self, in_channels, out_channels, strides, is_top): + if self.num_res_units > 0: + return ResidualUnit(self.dimensions, in_channels, out_channels, strides, self.kernel_size, self.num_res_units, + self.instance_norm, self.dropout) else: - return Convolution(self.dimensions, inChannels, outChannels, strides, self.kernelSize, - self.instanceNorm, self.dropout) + return Convolution(self.dimensions, in_channels, out_channels, strides, self.kernel_size, self.instance_norm, + self.dropout) - def _getBottomLayer(self, inChannels, outChannels): - return self._getDownLayer(inChannels, outChannels, 1, False) + def _get_bottom_layer(self, in_channels, out_channels): + return self._get_down_layer(in_channels, out_channels, 1, False) - def _getUpLayer(self, inChannels, outChannels, strides, isTop): - conv = Convolution(self.dimensions, inChannels, outChannels, strides, self.upKernelSize, self.instanceNorm, - self.dropout, convOnly=isTop and self.numResUnits == 0, isTransposed=True) + def _get_up_layer(self, in_channels, out_channels, strides, is_top): + conv = Convolution(self.dimensions, + in_channels, + out_channels, + strides, + self.up_kernel_size, + self.instance_norm, + self.dropout, + conv_only=is_top and self.num_res_units == 0, + is_transposed=True) - if self.numResUnits > 0: - ru = ResidualUnit(self.dimensions, outChannels, outChannels, 1, self.kernelSize, 1, self.instanceNorm, - self.dropout, lastConvOnly=isTop) + if self.num_res_units > 0: + ru = ResidualUnit(self.dimensions, + out_channels, + out_channels, + 1, + self.kernel_size, + 1, + self.instance_norm, + self.dropout, + last_conv_only=is_top) return nn.Sequential(conv, ru) else: return conv def forward(self, x): x = self.model(x) - return x, predictSegmentation(x) + return x, predict_segmentation(x) diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 9c4d043e6f..7514fdbfe9 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -6,28 +6,28 @@ import torch.nn as nn -def oneHot(labels, numClasses): +def one_hot(labels, num_classes): """ - For a tensor `labels' of dimensions BC[D][H]W, return a tensor of dimensions BC[D][H]WN for `numClasses' N number of + For a tensor `labels' of dimensions BC[D][H]W, return a tensor of dimensions BC[D][H]WN for `num_classes' N number of classes. For every value v = labels[b,c,h,w], the value in the result at [b,c,h,w,v] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ - onehotshape = tuple(labels.shape) + (numClasses,) - labels = labels % numClasses - y = torch.eye(numClasses, device=labels.device) + onehotshape = tuple(labels.shape) + (num_classes,) + labels = labels % num_classes + y = torch.eye(num_classes, device=labels.device) onehot = y[labels.view(-1).long()] return onehot.reshape(*onehotshape) -def sliceChannels(tensor, *slicevals): +def slice_channels(tensor, *slicevals): slices = [slice(None)] * len(tensor.shape) slices[1] = slice(*slicevals) return tensor[slices] -def predictSegmentation(logits): +def predict_segmentation(logits): """ Given the logits from a network, computing the segmentation by thresholding all values above 0 if `logits' has one channel, or computing the argmax along the channel axis otherwise. @@ -39,8 +39,8 @@ def predictSegmentation(logits): return logits.max(1)[1] # take the index of the max value along dimension 1 -def getConvType(dim, isTranspose): - if isTranspose: +def get_conv_type(dim, is_transpose): + if is_transpose: types = [nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d] else: types = [nn.Conv1d, nn.Conv2d, nn.Conv3d] @@ -48,16 +48,15 @@ def getConvType(dim, isTranspose): return types[dim - 1] -def getDropoutType(dim): +def get_dropout_type(dim): types = [nn.Dropout, nn.Dropout2d, nn.Dropout3d] return types[dim - 1] -def getNormalizeType(dim, isInstance): - if isInstance: +def get_normalize_type(dim, is_instance): + if is_instance: types = [nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d] else: types = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d] return types[dim - 1] - diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index 0f7ef34d9c..9ad64f2b88 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -1,3 +1,3 @@ # have to explicitly bring these in here to resolve circular import issues -from .aliases import alias, resolveName +from .aliases import alias, resolve_name from .moduleutils import export diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index f79a53c572..362fb4ea62 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -4,8 +4,8 @@ import importlib -aliasLock = threading.RLock() -globalAliases = {} +AliasLock = threading.RLock() +GlobalAliases = {} def alias(*names): @@ -15,25 +15,25 @@ def alias(*names): def _outer(obj): for n in names: - with aliasLock: - globalAliases[n] = obj + with AliasLock: + GlobalAliases[n] = obj return obj return _outer -def resolveName(name): +def resolve_name(name): """ Search for the declaration (function or class) with the given name. This will first search the list of aliases to see if it was declared with this aliased name, then search treating `name` as a fully qualified name, then search the loaded modules for one having a declaration with the given name. If no declaration is found, raise ValueError. """ # attempt to resolve an alias - with aliasLock: - obj = globalAliases.get(name, None) + with AliasLock: + obj = GlobalAliases.get(name, None) - assert name not in globalAliases or obj is not None + assert name not in GlobalAliases or obj is not None # attempt to resolve a qualified name if obj is None and "." in name: diff --git a/monai/utils/arrayutils.py b/monai/utils/arrayutils.py index dcc6191d95..cc6c843c52 100644 --- a/monai/utils/arrayutils.py +++ b/monai/utils/arrayutils.py @@ -1,36 +1,37 @@ import random + import numpy as np -def randChoice(prob=0.5): +def rand_choice(prob=0.5): """Returns True if a randomly chosen number is less than or equal to `prob', by default this is a 50/50 chance.""" return random.random() <= prob -def imgBounds(img): +def img_bounds(img): """Returns the minimum and maximum indices of non-zero lines in axis 0 of `img', followed by that for axis 1.""" ax0 = np.any(img, axis=0) ax1 = np.any(img, axis=1) return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]])) -def inBounds(x, y, margin, maxx, maxy): +def in_bounds(x, y, margin, maxx, maxy): """Returns True if (x,y) is within the rectangle (margin,margin,maxx-margin,maxy-margin).""" return margin <= x < (maxx - margin) and margin <= y < (maxy - margin) -def isEmpty(img): +def is_empty(img): """Returns True if `img' is empty, that is its maximum value is not greater than its minimum.""" return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True -def ensureTupleSize(tup, dim): +def ensure_tuple_size(tup, dim): """Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary.""" tup = tuple(tup) + (0,) * dim return tup[:dim] -def zeroMargins(img, margin): +def zero_margins(img, margin): """Returns True if the values within `margin' indices of the edges of `img' in dimensions 1 and 2 are 0.""" if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]): return False @@ -41,7 +42,7 @@ def zeroMargins(img, margin): return True -def rescaleArray(arr, minv=0.0, maxv=1.0, dtype=np.float32): +def rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): """Rescale the values of numpy array `arr' to be from `minv' to `maxv'.""" if dtype is not None: arr = arr.astype(dtype) @@ -56,29 +57,29 @@ def rescaleArray(arr, minv=0.0, maxv=1.0, dtype=np.float32): return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default -def rescaleInstanceArray(arr, minv=0.0, maxv=1.0, dtype=np.float32): +def rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): """Rescale each array slice along the first dimension of `arr' independently.""" out = np.zeros(arr.shape, dtype) for i in range(arr.shape[0]): - out[i] = rescaleArray(arr[i], minv, maxv, dtype) + out[i] = rescale_array(arr[i], minv, maxv, dtype) return out -def rescaleArrayIntMax(arr, dtype=np.uint16): +def rescale_array_int_max(arr, dtype=np.uint16): """Rescale the array `arr' to be between the minimum and maximum values of the type `dtype'.""" info = np.iinfo(dtype) - return rescaleArray(arr, info.min, info.max).astype(dtype) + return rescale_array(arr, info.min, info.max).astype(dtype) -def copypasteArrays(src, dest, srccenter, destcenter, dims): +def copypaste_arrays(src, dest, srccenter, destcenter, dims): """ - Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0 - or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied - into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the - source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice + Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0 + or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied + into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the + source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice objects indexing the copied area in `src', and those indexing the copy area in `dest'. - + Example: src=np.random.randint(0,10,(6,6)) dest=np.zeros_like(src) @@ -86,7 +87,7 @@ def copypasteArrays(src, dest, srccenter, destcenter, dims): dest[destslices]=src[srcslices] print(src) print(dest) - + >>> [[9 5 6 6 9 6] [4 3 5 6 1 2] [0 7 3 2 4 1] @@ -106,7 +107,8 @@ def copypasteArrays(src, dest, srccenter, destcenter, dims): for i, ss, ds, sc, dc, dim in zip(range(src.ndim), src.shape, dest.shape, srccenter, destcenter, dims): if dim: d1 = np.clip(dim // 2, 0, min(sc, dc)) # dimension before midpoint, clip to size fitting in both arrays - d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc)) # dimension after midpoint, clip to size fitting in both arrays + d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, + ds - dc)) # dimension after midpoint, clip to size fitting in both arrays srcslices[i] = slice(sc - d1, sc + d2) destslices[i] = slice(dc - d1, dc + d2) @@ -114,17 +116,19 @@ def copypasteArrays(src, dest, srccenter, destcenter, dims): return tuple(srcslices), tuple(destslices) -def resizeCenter(img, *resizeDims, fillValue=0): +def resize_center(img, *resize_dims, fill_value=0): """ Resize `img' by cropping or expanding the image from the center. The `resizeDims' values are the output dimensions (or None to use original dimension of `img'). If a dimension is smaller than that of `img' then the result will be cropped and if larger padded with zeros, in both cases this is done relative to the center of `img'. The result is a new image with the specified dimensions and values from `img' copied into its center. """ - resizeDims = tuple(resizeDims[i] or img.shape[i] for i in range(len(resizeDims))) + resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims))) - dest = np.full(resizeDims, fillValue, img.dtype) - srcslices, destslices = copypasteArrays(img, dest, np.asarray(img.shape) // 2, np.asarray(dest.shape) // 2, resizeDims) + dest = np.full(resize_dims, fill_value, img.dtype) + srcslices, destslices = copypaste_arrays(img, dest, + np.asarray(img.shape) // 2, + np.asarray(dest.shape) // 2, resize_dims) dest[destslices] = img[srcslices] return dest diff --git a/monai/utils/convutils.py b/monai/utils/convutils.py index 67be4e6326..9e030e140d 100644 --- a/monai/utils/convutils.py +++ b/monai/utils/convutils.py @@ -4,37 +4,37 @@ import numpy as np -def samePadding(kernelSize, dilation=1): +def same_padding(kernel_size, dilation=1): """ Return the padding value needed to ensure a convolution using the given kernel size produces an output of the same shape as the input for a stride of 1, otherwise ensure a shape of the input divided by the stride rounded down. """ - kernelSize = np.atleast_1d(kernelSize) - padding = ((kernelSize - 1) // 2) + (dilation - 1) + kernel_size = np.atleast_1d(kernel_size) + padding = ((kernel_size - 1) // 2) + (dilation - 1) padding = tuple(int(p) for p in padding) return tuple(padding) if len(padding) > 1 else padding[0] -def calculateOutShape(inShape, kernelSize, stride, padding): +def calculate_out_shape(in_shape, kernel_size, stride, padding): """ Calculate the output tensor shape when applying a convolution to a tensor of shape `inShape' with kernel size - 'kernelSize', stride value `stride', and input padding value `padding'. All arguments can be scalars or multiple + 'kernel_size', stride value `stride', and input padding value `padding'. All arguments can be scalars or multiple values, return value is a scalar if all inputs are scalars. """ - inShape = np.atleast_1d(inShape) - outShape = ((inShape - kernelSize + padding + padding) // stride) + 1 - outShape = tuple(int(s) for s in outShape) + in_shape = np.atleast_1d(in_shape) + out_shape = ((in_shape - kernel_size + padding + padding) // stride) + 1 + out_shape = tuple(int(s) for s in out_shape) - return tuple(outShape) if len(outShape) > 1 else outShape[0] + return tuple(out_shape) if len(out_shape) > 1 else out_shape[0] -def oneHot(labels, numClasses): +def one_hot(labels, num_classes): """ - Converts label image `labels' to a one-hot vector with `numClasses' number of channels as last dimension. + Converts label image `labels' to a one-hot vector with `num_classes' number of channels as last dimension. """ - labels = labels % numClasses - y = np.eye(numClasses) + labels = labels % num_classes + y = np.eye(num_classes) onehot = y[labels.flatten()] - return onehot.reshape(tuple(labels.shape) + (numClasses,)).astype(labels.dtype) + return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype) diff --git a/monai/utils/decorators.py b/monai/utils/decorators.py index 73ec32c103..98117b9924 100644 --- a/monai/utils/decorators.py +++ b/monai/utils/decorators.py @@ -1,5 +1,6 @@ import time from functools import wraps + import monai export = monai.utils.export("monai.utils") @@ -31,38 +32,38 @@ class RestartGenerator: used to create an iterator which can start iteration over the given generator multiple times. """ - def __init__(self, createGen): - self.createGen = createGen + def __init__(self, create_gen): + self.create_gen = create_gen def __iter__(self): - return self.createGen() + return self.create_gen() @export class MethodReplacer(object): """ - Base class for method decorators which can be used to replace methods pass to replaceMethod() with wrapped versions. + Base class for method decorators which can be used to replace methods pass to replace_method() with wrapped versions. """ - replaceListName = "__replacemethods__" + replace_list_name = "__replacemethods__" def __init__(self, meth): self.meth = meth - def replaceMethod(self, meth): + def replace_method(self, meth): """Return a new method to replace `meth` in the instantiated object, or `meth` to do nothing.""" return meth def __set_name__(self, owner, name): """ - Add the (name,self.replaceMethod) pair to the list named by replaceListName in `owner`, creating the list and + Add the (name,self.replace_method) pair to the list named by replace_list_name in `owner`, creating the list and replacing the constructor of `owner` if necessary. The replaced constructor will call the old one then do the - replacing operation of substituting, for each (name,self.replaceMethod) pair, the named method with the returned - value from self.replaceMethod. + replacing operation of substituting, for each (name,self.replace_method) pair, the named method with the returned + value from self.replace_method. """ - entry = (name, owner, self.replaceMethod) + entry = (name, owner, self.replace_method) - if not hasattr(owner, self.replaceListName): + if not hasattr(owner, self.replace_list_name): oldinit = owner.__init__ # replace the constructor with a new one which calls the old then replaces methods @@ -71,16 +72,16 @@ def newinit(_self, *args, **kwargs): oldinit(_self, *args, **kwargs) # replace each listed method of this newly constructed object - for m, owner, replacer in getattr(_self, self.replaceListName): + for m, owner, replacer in getattr(_self, self.replace_list_name): if isinstance(_self, owner): meth = getattr(_self, m) newmeth = replacer(meth) setattr(_self, m, newmeth) setattr(owner, "__init__", newinit) - setattr(owner, self.replaceListName, [entry]) + setattr(owner, self.replace_list_name, [entry]) else: - namelist = getattr(owner, self.replaceListName) + namelist = getattr(owner, self.replace_list_name) if not any(nl[0] == name for nl in namelist): namelist.append(entry) diff --git a/monai/utils/mathutils.py b/monai/utils/mathutils.py index 8f5dd3c88e..0a9d27b957 100644 --- a/monai/utils/mathutils.py +++ b/monai/utils/mathutils.py @@ -1,19 +1,18 @@ import itertools -import numpy as np -def zipWith(op, *vals, mapfunc=map): +def zip_with(op, *vals, mapfunc=map): """ Map `op`, using `mapfunc`, to each tuple derived from zipping the iterables in `vals'. """ return mapfunc(op, zip(*vals)) -def starZipWith(op, *vals): +def star_zip_with(op, *vals): """ Use starmap as the mapping function in zipWith. """ - return zipWith(op, *vals, mapfunc=itertools.starmap) + return zip_with(op, *vals, mapfunc=itertools.starmap) def first(iterable, default=None): @@ -25,7 +24,7 @@ def first(iterable, default=None): return default -def ensureTuple(vals): +def ensure_tuple(vals): if not isinstance(vals, (list, tuple)): vals = (vals,) diff --git a/monai/utils/moduleutils.py b/monai/utils/moduleutils.py index e0da2b89c3..c68dd9d872 100644 --- a/monai/utils/moduleutils.py +++ b/monai/utils/moduleutils.py @@ -16,15 +16,15 @@ def _inner(obj): return _inner -def loadSubmodules(basemod, loadAll=True, excludePattern="(.*[tT]est.*)|(_.*)"): +def load_submodules(basemod, load_all=True, exclude_pattern="(.*[tT]est.*)|(_.*)"): """ Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if `loadAll` is True, excluding anything whose name matches `excludePattern`. """ submodules = [] - for importer, name, isPkg in walk_packages(basemod.__path__): - if (isPkg or loadAll) and match(excludePattern, name) is None: + for importer, name, is_pkg in walk_packages(basemod.__path__): + if (is_pkg or load_all) and match(exclude_pattern, name) is None: mod = import_module(basemod.__name__ + "." + name) # why do I need to do this first? importer.find_module(name).load_module(name) submodules.append(mod) @@ -33,7 +33,7 @@ def loadSubmodules(basemod, loadAll=True, excludePattern="(.*[tT]est.*)|(_.*)"): @export("monai.utils") -def getFullTypeName(typeobj): +def get_full_type_name(typeobj): module = typeobj.__module__ if module is None or module == str.__class__.__module__: return typeobj.__name__ # Avoid reporting __builtin__ From 1f521a198635b0b378d4e0be14e48e025258d72f Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 13 Jan 2020 14:35:09 +0000 Subject: [PATCH 2/3] update the function signatures --- monai/networks/layers/convolutions.py | 28 ++++----------------------- monai/networks/nets/unet.py | 13 ++----------- 2 files changed, 6 insertions(+), 35 deletions(-) diff --git a/monai/networks/layers/convolutions.py b/monai/networks/layers/convolutions.py index d21d4cfa2d..3dd2222b89 100644 --- a/monai/networks/layers/convolutions.py +++ b/monai/networks/layers/convolutions.py @@ -7,18 +7,8 @@ class Convolution(nn.Sequential): - def __init__(self, - dimensions, - in_channels, - out_channels, - strides=1, - kernel_size=3, - instance_norm=True, - dropout=0, - dilation=1, - bias=True, - conv_only=False, - is_transposed=False): + def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, instance_norm=True, dropout=0, + dilation=1, bias=True, conv_only=False, is_transposed=False): super().__init__() self.dimensions = dimensions self.in_channels = in_channels @@ -47,18 +37,8 @@ def __init__(self, class ResidualUnit(nn.Module): - def __init__(self, - dimensions, - in_channels, - out_channels, - strides=1, - kernel_size=3, - subunits=2, - instance_norm=True, - dropout=0, - dilation=1, - bias=True, - last_conv_only=False): + def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, subunits=2, instance_norm=True, + dropout=0, dilation=1, bias=True, last_conv_only=False): super().__init__() self.dimensions = dimensions self.in_channels = in_channels diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index 73d99f958c..4cb9d13c5f 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -11,17 +11,8 @@ @alias("Unet", "unet") class UNet(nn.Module): - def __init__(self, - dimensions, - in_channels, - num_classes, - channels, - strides, - kernel_size=3, - up_kernel_size=3, - num_res_units=0, - instance_norm=True, - dropout=0): + def __init__(self, dimensions, in_channels, num_classes, channels, strides, kernel_size=3, up_kernel_size=3, + num_res_units=0, instance_norm=True, dropout=0): super().__init__() assert len(channels) == (len(strides) + 1) self.dimensions = dimensions From 05b248c01add51e986f83a8e85c089235f232ca8 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 13 Jan 2020 16:49:50 +0000 Subject: [PATCH 3/3] aliasLock -> alias_lock --- monai/utils/aliases.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index ee7d48bfa3..390b542ec8 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -15,7 +15,7 @@ import importlib -AliasLock = threading.RLock() +alias_lock = threading.RLock() GlobalAliases = {} @@ -26,7 +26,7 @@ def alias(*names): def _outer(obj): for n in names: - with AliasLock: + with alias_lock: GlobalAliases[n] = obj return obj @@ -41,7 +41,7 @@ def resolve_name(name): the loaded modules for one having a declaration with the given name. If no declaration is found, raise ValueError. """ # attempt to resolve an alias - with AliasLock: + with alias_lock: obj = GlobalAliases.get(name, None) assert name not in GlobalAliases or obj is not None