From e47beafea6b808e56362df8e3a9a450defe28761 Mon Sep 17 00:00:00 2001 From: skaliy <145155@stud.hvl.no> Date: Wed, 12 Jul 2023 10:56:37 +0200 Subject: [PATCH] Dev: refactorization and added docstring --- CONTRIBUTING.md | 9 +- fastMONAI/__init__.py | 2 +- fastMONAI/_modidx.py | 26 +- fastMONAI/dataset_info.py | 108 ++-- fastMONAI/external_data.py | 257 +++++--- fastMONAI/utils.py | 17 +- fastMONAI/vision_augmentation.py | 301 +++++----- fastMONAI/vision_core.py | 70 ++- fastMONAI/vision_data.py | 264 ++++++--- fastMONAI/vision_inference.py | 46 +- fastMONAI/vision_loss.py | 105 ++-- fastMONAI/vision_metrics.py | 69 ++- fastMONAI/vision_plot.py | 28 +- nbs/00_vision_plot.ipynb | 30 +- nbs/01_vision_core.ipynb | 92 +-- nbs/02_vision_data.ipynb | 359 ++++++----- nbs/03_vision_augment.ipynb | 922 +++++------------------------ nbs/04_vision_loss_functions.ipynb | 109 ++-- nbs/05_vision_metrics.ipynb | 73 ++- nbs/06_vision_inference.ipynb | 48 +- nbs/07_utils.ipynb | 19 +- nbs/08_dataset_info.ipynb | 177 ++---- nbs/09_external_data.ipynb | 385 ++++++++---- settings.ini | 2 +- 24 files changed, 1658 insertions(+), 1860 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4ff6187..621c0f4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,2 +1,9 @@ # How to contribute -fastMONAI follows the same contribution policy as fastai: https://github.com/fastai/nbdev/blob/master/CONTRIBUTING.md +For any issues related to the source code, please open an issue in the corresponding GitHub repository. Contributions to the code or the model are welcome and should be proposed through a pull request. + +## How to get started +Install the git hooks that run automatic scripts during each commit and merge to strip the notebooks of superfluous metadata (and avoid merge conflicts). After cloning the repository, run the following command inside it: +nbdev_install_hooks + +1. pip install -e 'fastMONAI[dev]' +2. nbdev_install_hooks diff --git a/fastMONAI/__init__.py b/fastMONAI/__init__.py index 260c070..f9aa3e1 100644 --- a/fastMONAI/__init__.py +++ b/fastMONAI/__init__.py @@ -1 +1 @@ -__version__ = "0.3.1" +__version__ = "0.3.2" diff --git a/fastMONAI/_modidx.py b/fastMONAI/_modidx.py index b6769c2..43c1dcf 100644 --- a/fastMONAI/_modidx.py +++ b/fastMONAI/_modidx.py @@ -29,10 +29,10 @@ 'fastMONAI/external_data.py'), 'fastMONAI.external_data._process_ixi_xls': ( 'external_data.html#_process_ixi_xls', 'fastMONAI/external_data.py'), - 'fastMONAI.external_data._process_nodule_img': ( 'external_data.html#_process_nodule_img', - 'fastMONAI/external_data.py'), - 'fastMONAI.external_data.download_NoduleMNIST3D': ( 'external_data.html#download_nodulemnist3d', - 'fastMONAI/external_data.py'), + 'fastMONAI.external_data._process_medmnist_img': ( 'external_data.html#_process_medmnist_img', + 'fastMONAI/external_data.py'), + 'fastMONAI.external_data.download_and_process_MedMNIST3D': ( 'external_data.html#download_and_process_medmnist3d', + 'fastMONAI/external_data.py'), 'fastMONAI.external_data.download_example_spine_data': ( 'external_data.html#download_example_spine_data', 'fastMONAI/external_data.py'), 'fastMONAI.external_data.download_ixi_data': ( 'external_data.html#download_ixi_data', @@ -129,24 +129,10 @@ 'fastMONAI/vision_augmentation.py'), 'fastMONAI.vision_augmentation.ZNormalization.__init__': ( 'vision_augment.html#znormalization.__init__', 'fastMONAI/vision_augmentation.py'), + 'fastMONAI.vision_augmentation.ZNormalization._do_z_normalization': ( 'vision_augment.html#znormalization._do_z_normalization', + 'fastMONAI/vision_augmentation.py'), 'fastMONAI.vision_augmentation.ZNormalization.encodes': ( 'vision_augment.html#znormalization.encodes', 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_biasfield': ( 'vision_augment.html#_do_rand_biasfield', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_blur': ( 'vision_augment.html#_do_rand_blur', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_gamma': ( 'vision_augment.html#_do_rand_gamma', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_ghosting': ( 'vision_augment.html#_do_rand_ghosting', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_motion': ( 'vision_augment.html#_do_rand_motion', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_noise': ( 'vision_augment.html#_do_rand_noise', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_rand_spike': ( 'vision_augment.html#_do_rand_spike', - 'fastMONAI/vision_augmentation.py'), - 'fastMONAI.vision_augmentation._do_z_normalization': ( 'vision_augment.html#_do_z_normalization', - 'fastMONAI/vision_augmentation.py'), 'fastMONAI.vision_augmentation.do_pad_or_crop': ( 'vision_augment.html#do_pad_or_crop', 'fastMONAI/vision_augmentation.py')}, 'fastMONAI.vision_core': { 'fastMONAI.vision_core.MedBase': ('vision_core.html#medbase', 'fastMONAI/vision_core.py'), diff --git a/fastMONAI/dataset_info.py b/fastMONAI/dataset_info.py index 45ea0f0..7540590 100644 --- a/fastMONAI/dataset_info.py +++ b/fastMONAI/dataset_info.py @@ -14,18 +14,23 @@ import glob # %% ../nbs/08_dataset_info.ipynb 4 -class MedDataset(): - '''A class to extract and present information about the dataset.''' - - def __init__(self, path=None, # Path to the image folder - postfix:str='', # Specify the file type if there are different files in the folder - img_list:list=None, # Alternatively pass in a list with image paths - reorder:bool=False, # Whether to reorder the data to be closest to canonical (RAS+) orientation - dtype:(MedImage, MedMask)=MedImage, # Load data as datatype - max_workers:int=1 # The number of worker threads - ): - '''Constructs all the necessary attributes for the MedDataset object.''' +class MedDataset: + """A class to extract and present information about the dataset.""" + def __init__(self, path=None, postfix: str = '', img_list: list = None, + reorder: bool = False, dtype: (MedImage, MedMask) = MedImage, + max_workers: int = 1): + """Constructs MedDataset object. + + Args: + path (str, optional): Path to the image folder. + postfix (str, optional): Specify the file type if there are different files in the folder. + img_list (List[str], optional): Alternatively, pass in a list with image paths. + reorder (bool, optional): Whether to reorder the data to be closest to canonical (RAS+) orientation. + dtype (Union[MedImage, MedMask], optional): Load data as datatype. Default is MedImage. + max_workers (int, optional): The number of worker threads. Default is 1. + """ + self.path = path self.postfix = postfix self.img_list = img_list @@ -35,48 +40,43 @@ def __init__(self, path=None, # Path to the image folder self.df = self._create_data_frame() def _create_data_frame(self): - '''Private method that returns a dataframe with information about the dataset - - Returns: - DataFrame: A DataFrame with information about the dataset. - ''' + """Private method that returns a dataframe with information about the dataset.""" if self.path: self.img_list = glob.glob(f'{self.path}/*{self.postfix}*') if not self.img_list: print('Could not find images. Check the image path') - + with ThreadPoolExecutor(max_workers=self.max_workers) as executor: data_info_dict = list(executor.map(self._get_data_info, self.img_list)) - + df = pd.DataFrame(data_info_dict) - if df.orientation.nunique() > 1: print('The volumes in this dataset have different orientations. Recommended to pass in the argument reorder=True when creating a MedDataset object for this dataset') + + if df.orientation.nunique() > 1: + print('The volumes in this dataset have different orientations. ' + 'Recommended to pass in the argument reorder=True when creating a MedDataset object for this dataset') + return df def summary(self): - '''Summary DataFrame of the dataset with example path for similar data.''' - + """Summary DataFrame of the dataset with example path for similar data.""" + columns = ['dim_0', 'dim_1', 'dim_2', 'voxel_0', 'voxel_1', 'voxel_2', 'orientation'] - return self.df.groupby(columns,as_index=False).agg(example_path=('path', 'min'), total=('path', 'size')).sort_values('total', ascending=False) + + return self.df.groupby(columns, as_index=False).agg( + example_path=('path', 'min'), total=('path', 'size') + ).sort_values('total', ascending=False) def suggestion(self): - '''Voxel value that appears most often in dim_0, dim_1 and dim_2, and wheter the data should be reoriented.''' + """Voxel value that appears most often in dim_0, dim_1 and dim_2, and whether the data should be reoriented.""" + resample = [self.df.voxel_0.mode()[0], self.df.voxel_1.mode()[0], self.df.voxel_2.mode()[0]] - return resample, self.reorder - def _get_data_info(self, fn:str): - '''Private method to collect information about an image file. + def _get_data_info(self, fn: str): + """Private method to collect information about an image file.""" + _, o, _ = med_img_reader(fn, dtype=self.dtype, reorder=self.reorder, only_tensor=False) - Args: - fn: Image file path. - - Returns: - dict: A dictionary with information about the image file - ''' - - _,o,_ = med_img_reader(fn, dtype=self.dtype, reorder=self.reorder, only_tensor=False) - - info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2' :o.shape[3], + info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2': o.shape[3], 'voxel_0': round(o.spacing[0], 4), 'voxel_1': round(o.spacing[1], 4), 'voxel_2': round(o.spacing[2], 4), 'orientation': f'{"".join(o.orientation)}+'} @@ -87,28 +87,36 @@ def _get_data_info(self, fn:str): return info_dict - def get_largest_img_size(self, - resample:list=None # A list with voxel spacing [dim_0, dim_1, dim_2] - ) -> list: - '''Get the largest image size in the dataset.''' - dims = None + def get_largest_img_size(self, resample: list = None) -> list: + """Get the largest image size in the dataset.""" - if resample is not None: - + dims = None + + if resample is not None: org_voxels = self.df[["voxel_0", "voxel_1", 'voxel_2']].values org_dims = self.df[["dim_0", "dim_1", 'dim_2']].values - + ratio = org_voxels/resample new_dims = (org_dims * ratio).T dims = [new_dims[0].max().round(), new_dims[1].max().round(), new_dims[2].max().round()] - - else: dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()] - + + else: + dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()] + return dims # %% ../nbs/08_dataset_info.ipynb 5 -def get_class_weights(train_labels:(np.array, list), class_weight='balanced'): - '''calculate class weights.''' +def get_class_weights(labels: (np.array, list), class_weight: str = 'balanced') -> torch.Tensor: + """Calculates and returns the class weights. + + Args: + labels: An array or list of class labels for each instance in the dataset. + class_weight: Defaults to 'balanced'. + + Returns: + A tensor of class weights. + """ + + class_weights = compute_class_weight(class_weight=class_weight, classes=np.unique(labels), y=labels) - class_weights = compute_class_weight(class_weight=class_weight, classes=np.unique(train_labels), y=train_labels) return torch.Tensor(class_weights) diff --git a/fastMONAI/external_data.py b/fastMONAI/external_data.py index e0f15ce..da06665 100644 --- a/fastMONAI/external_data.py +++ b/fastMONAI/external_data.py @@ -2,9 +2,9 @@ # %% auto 0 __all__ = ['MURLs', 'download_ixi_data', 'download_ixi_tiny', 'download_spine_test_data', 'download_example_spine_data', - 'download_NoduleMNIST3D'] + 'download_and_process_MedMNIST3D'] -# %% ../nbs/09_external_data.ipynb 2 +# %% ../nbs/09_external_data.ipynb 1 from pathlib import Path from glob import glob from numpy import load @@ -15,27 +15,36 @@ import multiprocessing as mp from functools import partial -# %% ../nbs/09_external_data.ipynb 4 +# %% ../nbs/09_external_data.ipynb 3 class MURLs(): - '''A class with external medical dataset URLs.''' + """A class with external medical dataset URLs.""" IXI_DATA = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar' IXI_DEMOGRAPHIC_INFORMATION = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls' CHENGWEN_CHU_SPINE_DATA = 'https://drive.google.com/uc?id=1rbm9-KKAexpNm2mC9FsSbfnS8VJaF3Kn&confirm=t' EXAMPLE_SPINE_DATA = 'https://drive.google.com/uc?id=1Ms3Q6MYQrQUA_PKZbJ2t2NeYFQ5jloMh' - NODULE_MNIST_DATA = 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1' + #NODULE_MNIST_DATA = 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1' + MEDMNIST_DICT = {'OrganMNIST3D': 'https://zenodo.org/record/6496656/files/organmnist3d.npz?download=1', + 'NoduleMNIST3D': 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1', + 'AdrenalMNIST3D': 'https://zenodo.org/record/6496656/files/adrenalmnist3d.npz?download=1', + 'FractureMNIST3D': 'https://zenodo.org/record/6496656/files/fracturemnist3d.npz?download=1', + 'VesselMNIST3D': 'https://zenodo.org/record/6496656/files/vesselmnist3d.npz?download=1', + 'SynapseMNIST3D': 'https://zenodo.org/record/6496656/files/synapsemnist3d.npz?download=1'} -# %% ../nbs/09_external_data.ipynb 5 -def _process_ixi_xls(xls_path:(str, Path), img_path: Path): - '''Private method to process the demographic information for the IXI dataset. +# %% ../nbs/09_external_data.ipynb 4 +def _process_ixi_xls(xls_path: (str, Path), img_path: Path) -> pd.DataFrame: + """Private method to process the demographic information for the IXI dataset. Args: xls_path: File path to the xls file with the demographic information. - img_path: Folder path to the images + img_path: Folder path to the images. Returns: - DataFrame: A processed dataframe with image path and demographic information. - ''' + A processed dataframe with image path and demographic information. + + Raises: + ValueError: If xls_path or img_path do not exist. + """ print('Preprocessing ' + str(xls_path)) @@ -45,14 +54,14 @@ def _process_ixi_xls(xls_path:(str, Path), img_path: Path): for subject_id in duplicate_subject_ids: age = df.loc[df.IXI_ID == subject_id].AGE.nunique() - if age != 1: df = df.loc[df.IXI_ID != subject_id] #Remove duplicates with two different age values + if age != 1: df = df.loc[df.IXI_ID != subject_id] # Remove duplicates with two different age values df = df.drop_duplicates(subset='IXI_ID', keep='first').reset_index(drop=True) df['subject_id'] = ['IXI' + str(subject_id).zfill(3) for subject_id in df.IXI_ID.values] df = df.rename(columns={'SEX_ID (1=m, 2=f)': 'gender'}) df['age_at_scan'] = df.AGE.round(2) - df = df.replace({'gender': {1:'M', 2:'F'}}) + df = df.replace({'gender': {1: 'M', 2: 'F'}}) img_list = list(img_path.glob('*.nii.gz')) for path in img_list: @@ -61,50 +70,58 @@ def _process_ixi_xls(xls_path:(str, Path), img_path: Path): df = df.dropna() df = df[['t1_path', 'subject_id', 'gender', 'age_at_scan']] + return df -# %% ../nbs/09_external_data.ipynb 7 -def download_ixi_data(path:(str, Path)='../data' # Path to the directory where the data will be stored - ): - '''Download T1 scans and demographic information from the IXI dataset, then process the demographic - information for each subject and save the information as a CSV file. - Returns path to the stored CSV file. - ''' - path = Path(path)/'IXI' - img_path = path/'T1_images' +# %% ../nbs/09_external_data.ipynb 6 +def download_ixi_data(path: (str, Path) = '../data') -> Path: + """Download T1 scans and demographic information from the IXI dataset. + + Args: + path: Path to the directory where the data will be stored. Defaults to '../data'. + + Returns: + The path to the stored CSV file. + """ + + path = Path(path) / 'IXI' + img_path = path / 'T1_images' # Check whether image data already present in img_path: - is_extracted=False + is_extracted = False try: - if len(list(img_path.iterdir())) >= 581: # 581 imgs in the IXI dataset - is_extracted=True + if len(list(img_path.iterdir())) >= 581: # 581 imgs in the IXI dataset + is_extracted = True print(f"Images already downloaded and extracted to {img_path}") except: - is_extracted=False + is_extracted = False - # Download and extract images - if not is_extracted: - download_and_extract(url=MURLs.IXI_DATA, filepath=path/'IXI-T1.tar', output_dir=img_path) - (path/'IXI-T1.tar').unlink() + if not is_extracted: + download_and_extract(url=MURLs.IXI_DATA, filepath=path / 'IXI-T1.tar', output_dir=img_path) + (path / 'IXI-T1.tar').unlink() + download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path / 'IXI.xls') - # Download demographic info - download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path/'IXI.xls') - - processed_df = _process_ixi_xls(xls_path=path/'IXI.xls', img_path=img_path) - processed_df.to_csv(path/'dataset.csv',index=False) + processed_df = _process_ixi_xls(xls_path=path / 'IXI.xls', img_path=img_path) + processed_df.to_csv(path / 'dataset.csv', index=False) return path -# %% ../nbs/09_external_data.ipynb 9 -def download_ixi_tiny(path:(str, Path)='../data'): - ''' Download tiny version of IXI provided by TorchIO, containing 566 T1 brain MR scans and their corresponding brain segmentations.''' +# %% ../nbs/09_external_data.ipynb 8 +def download_ixi_tiny(path: (str, Path) = '../data') -> Path: + """Download the tiny version of the IXI dataset provided by TorchIO. + + Args: + path: The directory where the data will be + stored. If not provided, defaults to '../data'. + + Returns: + The path to the directory where the data is stored. + """ - path = Path(path)/'IXITiny' + path = Path(path) / 'IXITiny' - #Download MR scans and segmentation masks IXITiny(root=str(path), download=True) - # Download demographic info download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path/'IXI.xls') processed_df = _process_ixi_xls(xls_path=path/'IXI.xls', img_path=path/'image') @@ -115,96 +132,154 @@ def download_ixi_tiny(path:(str, Path)='../data'): return path # %% ../nbs/09_external_data.ipynb 10 -def _create_spine_df(test_dir:Path): - # Get a list of the image files in the 'img' directory - img_list = glob(str(test_dir/'img/*.nii.gz')) +def _create_spine_df(dir: Path) -> pd.DataFrame: + """Create a pandas DataFrame containing information about spinal images. - # Create a list of the corresponding mask files in the 'seg' directory - mask_list = [str(fn).replace('img', 'seg') for fn in img_list] + Args: + dir: Directory path where data (image and segmentation + mask files) are stored. - # Create a list of the subject IDs for each image file + Returns: + A DataFrame containing the paths to the image files and their + corresponding mask files, the subject IDs, and a flag indicating that + these are test data. + """ + + img_list = glob(str(dir / 'img/*.nii.gz')) + mask_list = [str(fn).replace('img', 'seg') for fn in img_list] subject_id_list = [fn.split('_')[-1].split('.')[0] for fn in mask_list] - # Create a dictionary containing the test data - test_data = {'t2_img_path':img_list, 't2_mask_path':mask_list, 'subject_id':subject_id_list, 'is_test':True} + test_data = { + 't2_img_path': img_list, + 't2_mask_path': mask_list, + 'subject_id': subject_id_list, + 'is_test': True, + } - # Create a DataFrame from the example data dictionary return pd.DataFrame(test_data) -# %% ../nbs/09_external_data.ipynb 12 -def download_spine_test_data(path:(str, Path)='../data'): +# %% ../nbs/09_external_data.ipynb 11 +def download_spine_test_data(path: (str, Path) = '../data') -> pd.DataFrame: + """Downloads T2w scans from the study 'Fully Automatic Localization and + Segmentation of 3D Vertebral Bodies from CT/MR Images via a Learning-Based + Method' by Chu et. al. + + Args: + path: Directory where the downloaded data + will be stored and extracted. Defaults to '../data'. + + Returns: + Processed dataframe containing image paths, label paths, and subject IDs. + """ - ''' Download T2w scans from 'Fully Automatic Localization and Segmentation of 3D Vertebral Bodies from CT/MR Images via a Learning-Based Method' study by Chu et. al. - Returns a processed dataframe with image path, label path and subject IDs. - ''' study = 'chengwen_chu_2015' - download_and_extract(url=MURLs.CHENGWEN_CHU_SPINE_DATA, filepath=f'{study}.zip', output_dir=path) + download_and_extract( + url=MURLs.CHENGWEN_CHU_SPINE_DATA, + filepath=f'{study}.zip', + output_dir=path + ) Path(f'{study}.zip').unlink() - return _create_spine_df(Path(path)/study) + return _create_spine_df(Path(path) / study) + +# %% ../nbs/09_external_data.ipynb 12 +def download_example_spine_data(path: (str, Path) = '../data') -> Path: + """Downloads example T2w scan and corresponding predicted mask. + + Args: + path: Directory where the downloaded data + will be stored and extracted. Defaults to '../data'. -# %% ../nbs/09_external_data.ipynb 13 -def download_example_spine_data(path:(str, Path)='../data'): + Returns: + Path to the directory where the example data has been extracted. + """ - '''Download example T2w scan and predicted mask.''' study = 'example_data' - download_and_extract(url=MURLs.EXAMPLE_SPINE_DATA, filepath='example_data.zip', output_dir=path); + download_and_extract( + url=MURLs.EXAMPLE_SPINE_DATA, + filepath='example_data.zip', + output_dir=path + ) Path('example_data.zip').unlink() - return Path(path/study) + return Path(path) / study -# %% ../nbs/09_external_data.ipynb 15 -def _process_nodule_img(path, idx_arr): - '''Save tensor as NIfTI.''' +# %% ../nbs/09_external_data.ipynb 18 +def _process_medmnist_img(path, idx_arr): + """Save tensor as NIfTI.""" + idx, arr = idx_arr img = ScalarImage(tensor=arr[None, :]) fn = path/f'{idx}_nodule.nii.gz' img.save(fn) return str(fn) -# %% ../nbs/09_external_data.ipynb 16 +# %% ../nbs/09_external_data.ipynb 19 def _df_sort_and_add_columns(df, label_list, is_val): - '''Sort the dataframe based on img_idx and add labels and if it is validation data column''' + """Sort the dataframe based on img_idx and add labels and if it is validation data column.""" + df = df.sort_values(by='img_idx').reset_index(drop=True) df['labels'], df['is_val'] = label_list, is_val - df = df.replace({"labels": {0:'b', 1:'m'}}) + #df = df.replace({"labels": {0:'b', 1:'m'}}) df = df.drop('img_idx', axis=1) return df -# %% ../nbs/09_external_data.ipynb 17 +# %% ../nbs/09_external_data.ipynb 20 def _create_nodule_df(pool, output_dir, imgs, labels, is_val=False): - '''Create dataframe for NoduleMNIST3D data.''' - img_path_list = pool.map(partial(_process_nodule_img, output_dir), enumerate(imgs)) + """Create dataframe for MedMNIST data.""" + + img_path_list = pool.map(partial(_process_medmnist_img, output_dir), enumerate(imgs)) img_idx = [float(Path(fn).parts[-1].split('_')[0]) for fn in img_path_list] df = pd.DataFrame(list(zip(img_path_list, img_idx)), columns=['img_path','img_idx']) return _df_sort_and_add_columns(df, labels, is_val) -# %% ../nbs/09_external_data.ipynb 18 -def download_NoduleMNIST3D(path:(str, Path)='../data', max_workers=1): - - '''Download ....''' - study = 'NoduleMNIST3D' - path = Path(path)/study - - download_url(url=MURLs.NODULE_MNIST_DATA, filepath=path/f'{study}.npz'); - data = load(path/f'{study}.npz') - key_fn = ['train_images', 'val_images', 'test_images'] - for fn in key_fn: (path/fn).mkdir(exist_ok=True) - - - train_imgs, val_imgs, test_imgs = data[key_fn[0]], data[key_fn[1]], data[key_fn[2]] +# %% ../nbs/09_external_data.ipynb 21 +def download_and_process_MedMNIST3D(study: str, + path: (str, Path) = '../data', + max_workers: int = 1) -> Tuple[pd.DataFrame, pd.DataFrame]: + """Downloads and processes a particular MedMNIST dataset. + Args: + study: select MedMNIST dataset ('OrganMNIST3D', 'NoduleMNIST3D', + 'AdrenalMNIST3D', 'FractureMNIST3D', 'VesselMNIST3D', 'SynapseMNIST3D') + path: Directory where the downloaded data + will be stored and extracted. Defaults to '../data'. + max_workers: Maximum number of worker processes to use + for data processing. Defaults to 1. + + Returns: + Two pandas DataFrames. The first DataFrame combines training and validation data, + and the second DataFrame contains the testing data. + """ + path = Path(path) / study + dataset_file_path = path / f'{study}.npz' + + try: + download_url(url=MURLs.MEDMNIST_DICT[study], filepath=dataset_file_path) + except: + raise ValueError(f"Dataset '{study}' does not exist.") + + data = load(dataset_file_path) + keys = ['train_images', 'val_images', 'test_images'] + + for key in keys: + (path / key).mkdir(exist_ok=True) + + train_imgs, val_imgs, test_imgs = data[keys[0]], data[keys[1]], data[keys[2]] + # Process the data and create DataFrames with mp.Pool(processes=max_workers) as pool: - - train_df = _create_nodule_df(pool, path/key_fn[0], train_imgs, data['train_labels']) - val_df = _create_nodule_df(pool, path/key_fn[1], val_imgs, data['val_labels'], is_val=True) - test_df = _create_nodule_df(pool, path/key_fn[2], test_imgs, data['test_labels']) - + train_df = _create_nodule_df(pool, path / keys[0], train_imgs, data['train_labels']) + val_df = _create_nodule_df(pool, path / keys[1], val_imgs, data['val_labels'], is_val=True) + test_df = _create_nodule_df(pool, path / keys[2], test_imgs, data['test_labels']) + train_val_df = pd.concat([train_df, val_df], ignore_index=True) - + + dataset_file_path.unlink() + return train_val_df, test_df + diff --git a/fastMONAI/utils.py b/fastMONAI/utils.py index d4a6a1d..4aa965a 100644 --- a/fastMONAI/utils.py +++ b/fastMONAI/utils.py @@ -14,7 +14,7 @@ def store_variables(pkl_fn:(str, Path), reorder:bool, resample:(int,list), ) -> None: - '''Save variable values in a pickle file.''' + """Save variable values in a pickle file.""" var_vals = [size, reorder, resample] @@ -22,19 +22,22 @@ def store_variables(pkl_fn:(str, Path), pickle.dump(var_vals, f) # %% ../nbs/07_utils.ipynb 4 -def load_variables(pkl_fn # Filename of the pickle file - ): - '''Load stored variable values from a pickle file. +def load_variables(pkl_fn: (str, Path)) -> Any: + """ + Loads stored variable values from a pickle file. - Returns: A list of variable values. - ''' + Args: + pkl_fn: File path of the pickle file to be loaded. + Returns: + The deserialized value of the pickled data. + """ with open(pkl_fn, 'rb') as f: return pickle.load(f) # %% ../nbs/07_utils.ipynb 5 def print_colab_gpu_info(): - '''Check if we have a GPU attached to the runtime.''' + """Check if we have a GPU attached to the runtime.""" colab_gpu_msg =(f"{'#'*80}\n" "Remember to attach a GPU to your Colab Runtime:" diff --git a/fastMONAI/vision_augmentation.py b/fastMONAI/vision_augmentation.py index dad762c..50ca02e 100644 --- a/fastMONAI/vision_augmentation.py +++ b/fastMONAI/vision_augmentation.py @@ -12,75 +12,90 @@ # %% ../nbs/03_vision_augment.ipynb 5 class CustomDictTransform(ItemTransform): - '''Wrapper to perform an identical transformation on both image and target (if it is a mask) during training.''' + """A class that serves as a wrapper to perform an identical transformation on both + the image and the target (if it's a mask). + """ - split_idx = 0 - def __init__(self, aug): self.aug = aug + split_idx = 0 # Only perform transformations on training data. Use TTA() for transformations on validation data. + + def __init__(self, aug): + """Constructs CustomDictTransform object. + + Args: + aug (Callable): Function to apply augmentation on the image. + """ + self.aug = aug def encodes(self, x): - '''Apply transformation to an image, and the same random transformation to the target if it is a mask. + """ + Applies the stored transformation to an image, and the same random transformation + to the target if it is a mask. If the target is not a mask, it returns the target as is. Args: - x: Contains image and target. + x (Tuple[MedImage, Union[MedMask, TensorCategory]]): A tuple containing the + image and the target. Returns: - MedImage: Transformed image data. - (MedMask, TensorCategory, ...todo): If the target is a mask, then return a transformed mask data. Otherwise, return target value. - ''' - + Tuple[MedImage, Union[MedMask, TensorCategory]]: The transformed image and target. + If the target is a mask, it's transformed identically to the image. If the target + is not a mask, the original target is returned. + """ img, y_true = x if isinstance(y_true, (MedMask)): - aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img, affine=MedImage.affine_matrix), mask=tio.LabelMap(tensor=y_true, affine=MedImage.affine_matrix))) + aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img, affine=MedImage.affine_matrix), + mask=tio.LabelMap(tensor=y_true, affine=MedImage.affine_matrix))) return MedImage.create(aug['img'].data), MedMask.create(aug['mask'].data) - else: - aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img))) - return MedImage.create(aug['img'].data), y_true -# %% ../nbs/03_vision_augment.ipynb 8 -def do_pad_or_crop(o, target_shape, padding_mode, mask_name, dtype=torch.Tensor): + aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img))) + return MedImage.create(aug['img'].data), y_true + +# %% ../nbs/03_vision_augment.ipynb 7 +def do_pad_or_crop(o, target_shape, padding_mode, mask_name, dtype=torch.Tensor): + #TODO:refactorize pad_or_crop = tio.CropOrPad(target_shape=target_shape, padding_mode=padding_mode, mask_name=mask_name) return dtype(pad_or_crop(o)) -# %% ../nbs/03_vision_augment.ipynb 9 +# %% ../nbs/03_vision_augment.ipynb 8 class PadOrCrop(DisplayedTransform): - '''Resize image using TorchIO `CropOrPad`.''' + """Resize image using TorchIO `CropOrPad`.""" + + order = 0 - order=0 def __init__(self, size, padding_mode=0, mask_name=None): - if not is_listy(size): size=[size,size,size] - self.size, self.padding_mode, self.mask_name = size, padding_mode, mask_name + if not is_listy(size): + size = [size, size, size] + self.pad_or_crop = tio.CropOrPad(target_shape=size, + padding_mode=padding_mode, + mask_name=mask_name) - def encodes(self, o:(MedImage, MedMask)): - return do_pad_or_crop(o,target_shape=self.size, padding_mode=self.padding_mode, mask_name=self.mask_name, dtype=type(o)) + def encodes(self, o: (MedImage, MedMask)): + return type(o)(self.pad_or_crop(o)) -# %% ../nbs/03_vision_augment.ipynb 11 -def _do_z_normalization(o, masking_method, channel_wise): - - z_normalization = tio.ZNormalization(masking_method=masking_method) - normalized_tensor = torch.zeros(o.shape) +# %% ../nbs/03_vision_augment.ipynb 9 +class ZNormalization(DisplayedTransform): + """Apply TorchIO `ZNormalization`.""" - if channel_wise: - for idx, c in enumerate(o): - normalized_tensor[idx] = z_normalization(c[None])[0] - - else: normalized_tensor = z_normalization(o) + order = 0 - return normalized_tensor + def __init__(self, masking_method=None, channel_wise=True): + self.z_normalization = tio.ZNormalization(masking_method=masking_method) + self.channel_wise = channel_wise -# %% ../nbs/03_vision_augment.ipynb 12 -class ZNormalization(DisplayedTransform): - '''Apply TorchIO `ZNormalization`.''' + def encodes(self, o: MedImage): + return MedImage.create(self._do_z_normalization(o)) - order=0 - def __init__(self, masking_method=None, channel_wise=True): - self.masking_method, self.channel_wise = masking_method, channel_wise + def encodes(self, o: MedMask): + return o - def encodes(self, o:(MedImage)): return MedImage.create(_do_z_normalization(o, self.masking_method, self.channel_wise)) - def encodes(self, o:(MedMask)):return o + def _do_z_normalization(self, o): + if self.channel_wise: + return torch.stack([self.z_normalization(c[None])[0] for c in o]) + else: + return self.z_normalization(o) -# %% ../nbs/03_vision_augment.ipynb 14 +# %% ../nbs/03_vision_augment.ipynb 10 class BraTSMaskConverter(DisplayedTransform): '''Convert BraTS masks.''' @@ -92,115 +107,95 @@ def encodes(self, o:(MedMask)): o = torch.where(o==4, 3., o) return MedMask.create(o) -# %% ../nbs/03_vision_augment.ipynb 16 +# %% ../nbs/03_vision_augment.ipynb 11 class BinaryConverter(DisplayedTransform): '''Convert to binary mask.''' order=1 - def encodes(self, o:(MedImage)): return o + def encodes(self, o: MedImage): + return o - def encodes(self, o:(MedMask)): + def encodes(self, o: MedMask): o = torch.where(o>0, 1., 0) return MedMask.create(o) -# %% ../nbs/03_vision_augment.ipynb 18 -def _do_rand_ghosting(o, intensity, p): - - add_ghosts = tio.RandomGhosting(intensity=intensity, p=p) - return add_ghosts(o) - -# %% ../nbs/03_vision_augment.ipynb 19 +# %% ../nbs/03_vision_augment.ipynb 12 class RandomGhosting(DisplayedTransform): - '''Apply TorchIO `RandomGhosting`.''' - - split_idx,order=0,1 - - def __init__(self, intensity =(0.5, 1), p=0.5): - self.intensity, self.p = intensity, p + """Apply TorchIO `RandomGhosting`.""" + + split_idx, order = 0, 1 - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_ghosting(o, self.intensity, self.p)) - def encodes(self, o:(MedMask)):return o + def __init__(self, intensity=(0.5, 1), p=0.5): + self.add_ghosts = tio.RandomGhosting(intensity=intensity, p=p) -# %% ../nbs/03_vision_augment.ipynb 21 -def _do_rand_spike(o, num_spikes, intensity, p): + def encodes(self, o: MedImage): + return MedImage.create(self.add_ghosts(o)) - add_spikes = tio.RandomSpike(num_spikes=num_spikes, intensity=intensity, p=p) - return add_spikes(o) #return torch tensor + def encodes(self, o: MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 22 +# %% ../nbs/03_vision_augment.ipynb 13 class RandomSpike(DisplayedTransform): '''Apply TorchIO `RandomSpike`.''' split_idx,order=0,1 def __init__(self, num_spikes=1, intensity=(1, 3), p=0.5): - self.num_spikes, self.intensity, self.p = num_spikes, intensity, p + self.add_spikes = tio.RandomSpike(num_spikes=num_spikes, intensity=intensity, p=p) - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_spike(o, self.num_spikes, self.intensity, self.p)) - def encodes(self, o:(MedMask)):return o + def encodes(self, o:MedImage): + return MedImage.create(self.add_spikes(o)) + + def encodes(self, o:MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 24 -def _do_rand_noise(o, mean, std, p): - - add_noise = tio.RandomNoise(mean=mean, std=std, p=p) - return add_noise(o) #return torch tensor - -# %% ../nbs/03_vision_augment.ipynb 25 +# %% ../nbs/03_vision_augment.ipynb 14 class RandomNoise(DisplayedTransform): '''Apply TorchIO `RandomNoise`.''' split_idx,order=0,1 def __init__(self, mean=0, std=(0, 0.25), p=0.5): - self.mean, self.std, self.p = mean, std, p - - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_noise(o, mean=self.mean, std=self.std, p=self.p)) - def encodes(self, o:(MedMask)):return o + self.add_noise = tio.RandomNoise(mean=mean, std=std, p=p) -# %% ../nbs/03_vision_augment.ipynb 27 -def _do_rand_biasfield(o, coefficients, order, p): - - add_biasfield = tio.RandomBiasField(coefficients=coefficients, order=order, p=p) - return add_biasfield(o) #return torch tensor + def encodes(self, o: MedImage): + return MedImage.create(self.add_noise(o)) + + def encodes(self, o: MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 28 +# %% ../nbs/03_vision_augment.ipynb 15 class RandomBiasField(DisplayedTransform): '''Apply TorchIO `RandomBiasField`.''' split_idx,order=0,1 def __init__(self, coefficients=0.5, order=3, p=0.5): - self.coefficients, self.order, self.p = coefficients, order, p - - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_biasfield(o, coefficients=self.coefficients, order=self.order, p=self.p)) - def encodes(self, o:(MedMask)):return o - -# %% ../nbs/03_vision_augment.ipynb 30 -def _do_rand_blur(o, std, p): + self.add_biasfield = tio.RandomBiasField(coefficients=coefficients, order=order, p=p) - add_blur = tio.RandomBlur(std=std, p=p) - return add_blur(o) + def encodes(self, o: MedImage): + return MedImage.create(self.add_biasfield(o)) + + def encodes(self, o: MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 31 +# %% ../nbs/03_vision_augment.ipynb 16 class RandomBlur(DisplayedTransform): '''Apply TorchIO `RandomBiasField`.''' split_idx,order=0,1 def __init__(self, std=(0, 2), p=0.5): - self.std, self.p = std, p - - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_blur(o, std=self.std, p=self.p)) - def encodes(self, o:(MedMask)):return o - -# %% ../nbs/03_vision_augment.ipynb 33 -def _do_rand_gamma(o, log_gamma, p): - - add_gamma = tio.RandomGamma(log_gamma=log_gamma, p=p) - return add_gamma(o) + self.add_blur = tio.RandomBlur(std=std, p=p) + + def encodes(self, o: MedImage): + return MedImage.create(self.add_blur(o)) + + def encodes(self, o: MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 34 +# %% ../nbs/03_vision_augment.ipynb 17 class RandomGamma(DisplayedTransform): '''Apply TorchIO `RandomGamma`.''' @@ -208,53 +203,81 @@ class RandomGamma(DisplayedTransform): split_idx,order=0,1 def __init__(self, log_gamma=(-0.3, 0.3), p=0.5): - self.log_gamma, self.p = log_gamma, p - - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_gamma(o, log_gamma=self.log_gamma, p=self.p)) - def encodes(self, o:(MedMask)):return o - -# %% ../nbs/03_vision_augment.ipynb 36 -def _do_rand_motion(o, degrees, translation, num_transforms, image_interpolation, p): + self.add_gamma = tio.RandomGamma(log_gamma=log_gamma, p=p) - add_motion = tio.RandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation, p=p) - return add_motion(o) #return torch tensor + def encodes(self, o: MedImage): + return MedImage.create(self.add_gamma(o)) + + def encodes(self, o: MedMask): + return o -# %% ../nbs/03_vision_augment.ipynb 37 +# %% ../nbs/03_vision_augment.ipynb 18 class RandomMotion(DisplayedTransform): - '''Apply TorchIO `RandomMotion`.''' - - split_idx,order=0,1 - - def __init__(self, degrees=10, translation=10, num_transforms=2, image_interpolation='linear', p=0.5): - self.degrees,self.translation, self.num_transforms, self.image_interpolation, self.p = degrees,translation, num_transforms, image_interpolation, p - - def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_motion(o, degrees=self.degrees,translation=self.translation, num_transforms=self.num_transforms, image_interpolation=self.image_interpolation, p=self.p)) - def encodes(self, o:(MedMask)):return o - -# %% ../nbs/03_vision_augment.ipynb 40 + """Apply TorchIO `RandomMotion`.""" + + split_idx, order = 0, 1 + + def __init__( + self, + degrees=10, + translation=10, + num_transforms=2, + image_interpolation='linear', + p=0.5 + ): + self.add_motion = tio.RandomMotion( + degrees=degrees, + translation=translation, + num_transforms=num_transforms, + image_interpolation=image_interpolation, + p=p + ) + + def encodes(self, o: MedImage): + return MedImage.create(self.add_motion(o)) + + def encodes(self, o: MedMask): + return o + +# %% ../nbs/03_vision_augment.ipynb 20 class RandomElasticDeformation(CustomDictTransform): - '''Apply TorchIO `RandomElasticDeformation`.''' + """Apply TorchIO `RandomElasticDeformation`.""" - def __init__(self,num_control_points=7, max_displacement=7.5, image_interpolation='linear', p=0.5): - super().__init__(tio.RandomElasticDeformation(num_control_points=num_control_points, max_displacement=max_displacement, image_interpolation=image_interpolation, p=p)) + def __init__(self, num_control_points=7, max_displacement=7.5, + image_interpolation='linear', p=0.5): + + super().__init__(tio.RandomElasticDeformation( + num_control_points=num_control_points, + max_displacement=max_displacement, + image_interpolation=image_interpolation, + p=p)) -# %% ../nbs/03_vision_augment.ipynb 42 +# %% ../nbs/03_vision_augment.ipynb 21 class RandomAffine(CustomDictTransform): - '''Apply TorchIO `RandomAffine`.''' - - def __init__(self, scales=0, degrees=10, translation=0, isotropic=False, image_interpolation='linear', default_pad_value=0., p=0.5): - super().__init__(tio.RandomAffine(scales=scales, degrees=degrees, translation=translation, isotropic=isotropic, image_interpolation=image_interpolation, default_pad_value=default_pad_value, p=p)) + """Apply TorchIO `RandomAffine`.""" + + def __init__(self, scales=0, degrees=10, translation=0, isotropic=False, + image_interpolation='linear', default_pad_value=0., p=0.5): + + super().__init__(tio.RandomAffine( + scales=scales, + degrees=degrees, + translation=translation, + isotropic=isotropic, + image_interpolation=image_interpolation, + default_pad_value=default_pad_value, + p=p)) -# %% ../nbs/03_vision_augment.ipynb 44 +# %% ../nbs/03_vision_augment.ipynb 22 class RandomFlip(CustomDictTransform): - '''Apply TorchIO `RandomFlip`.''' + """Apply TorchIO `RandomFlip`.""" def __init__(self, axes='LR', p=0.5): super().__init__(tio.RandomFlip(axes=axes, flip_probability=p)) -# %% ../nbs/03_vision_augment.ipynb 46 +# %% ../nbs/03_vision_augment.ipynb 23 class OneOf(CustomDictTransform): - '''Apply only one of the given transforms using TorchIO `OneOf`.''' + """Apply only one of the given transforms using TorchIO `OneOf`.""" def __init__(self, transform_dict, p=1): super().__init__(tio.OneOf(transform_dict, p=p)) diff --git a/fastMONAI/vision_core.py b/fastMONAI/vision_core.py index 99fa1a2..f7fffce 100644 --- a/fastMONAI/vision_core.py +++ b/fastMONAI/vision_core.py @@ -10,7 +10,8 @@ # %% ../nbs/01_vision_core.ipynb 5 def _preprocess(obj, reorder, resample): - """Preprocesses the given object. + """ + Preprocesses the given object. Args: obj: The object to preprocess. @@ -83,12 +84,8 @@ def _multi_channel(image_paths: list, reorder: bool, resample: list, dtype, only # %% ../nbs/01_vision_core.ipynb 8 -def med_img_reader( - file_path: (str, Path), - dtype=torch.Tensor, - reorder: bool = False, - resample: list = None, - only_tensor: bool = True +def med_img_reader(file_path: (str, Path), dtype=torch.Tensor, reorder: bool = False, + resample: list = None, only_tensor: bool = True ): """Loads and preprocesses a medical image. @@ -120,32 +117,36 @@ def med_img_reader( # %% ../nbs/01_vision_core.ipynb 10 class MetaResolver(type(torch.Tensor), metaclass=BypassNewMeta): - '''A class to bypass metaclass conflict: + """ + A class to bypass metaclass conflict: https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/data/batch.html - ''' + """ pass # %% ../nbs/01_vision_core.ipynb 11 -class MedBase(torch.Tensor, metaclass=MetaResolver): - '''A class that represents an image object. Metaclass casts x to this class if it is of type cls._bypass_type.''' - - _bypass_type=torch.Tensor +class MedBase(torch.Tensor, metaclass=MetaResolver): + """A class that represents an image object. + Metaclass casts `x` to this class if it is of type `cls._bypass_type`.""" + + _bypass_type = torch.Tensor _show_args = {'cmap':'gray'} resample, reorder = None, False affine_matrix = None - @classmethod - def create(cls, fn: (Path, str, torch.Tensor), **kwargs): + def create(cls, fn: (Path, str, torch.Tensor), **kwargs) -> torch.Tensor: """ - Open a medical image and cast to MedBase object. If it is a torch.Tensor, cast to MedBase object. + Opens a medical image and casts it to MedBase object. + If `fn` is a torch.Tensor, it's cast to MedBase object. Args: - fn: Image path or a 4D torch.Tensor. - kwargs: Additional parameters. + fn : (Path, str, torch.Tensor) + Image path or a 4D torch.Tensor. + kwargs : dict + Additional parameters for the medical image reader. Returns: - A 4D tensor as MedBase object. + torch.Tensor : A 4D tensor as a MedBase object. """ if isinstance(fn, torch.Tensor): return cls(fn) @@ -155,18 +156,32 @@ def create(cls, fn: (Path, str, torch.Tensor), **kwargs): @classmethod def item_preprocessing(cls, resample: (list, int, tuple), reorder: bool): """ - Change the values for the class variables `resample` and `reorder`. + Changes the values for the class variables `resample` and `reorder`. Args: - resample: A list with voxel spacing. - reorder: Whether to reorder the data to be closest to canonical (RAS+) orientation. + resample : (list, int, tuple) + A list with voxel spacing. + reorder : bool + Whether to reorder the data to be closest to canonical (RAS+) orientation. """ cls.resample = resample cls.reorder = reorder - def show(self, ctx=None, channel=0, indices=None, anatomical_plane=0, **kwargs): + def show(self, ctx=None, channel: int = 0, indices: int = None, anatomical_plane: int = 0, **kwargs): """ - Show Medimage using `merge(self._show_args, kwargs)`. + Displays the Medimage using `merge(self._show_args, kwargs)`. + + Args: + ctx : Any, optional + Context to use for the display. Defaults to None. + channel : int, optional + The channel of the image to be displayed. Defaults to 0. + indices : list or None, optional + Indices of the images to be displayed. Defaults to None. + anatomical_plane : int, optional + Anatomical plane of the image to be displayed. Defaults to 0. + kwargs : dict, optional + Additional parameters for the show function. Returns: Shown image. @@ -177,15 +192,16 @@ def show(self, ctx=None, channel=0, indices=None, anatomical_plane=0, **kwargs): **merge(self._show_args, kwargs) ) - def __repr__(self): + def __repr__(self) -> str: + """Returns the string representation of the MedBase instance.""" return f'{self.__class__.__name__} mode={self.mode} size={"x".join([str(d) for d in self.size])}' # %% ../nbs/01_vision_core.ipynb 12 class MedImage(MedBase): - '''Subclass of MedBase that represents an image object.''' + """Subclass of MedBase that represents an image object.""" pass # %% ../nbs/01_vision_core.ipynb 13 class MedMask(MedBase): - '''Subclass of MedBase that represents an mask object.''' + """Subclass of MedBase that represents an mask object.""" _show_args = {'alpha':0.5, 'cmap':'tab20'} diff --git a/fastMONAI/vision_data.py b/fastMONAI/vision_data.py index 22479f1..1e0f562 100644 --- a/fastMONAI/vision_data.py +++ b/fastMONAI/vision_data.py @@ -10,168 +10,258 @@ from .vision_core import * # %% ../nbs/02_vision_data.ipynb 5 -def pred_to_multiclass_mask(pred:torch.Tensor # [C,W,H,D] activation tensor - ) -> torch.Tensor: - '''Apply Softmax function on the predicted tensor to rescale the values in the range [0, 1] and sum to 1. - Then apply argmax to get the indices of the maximum value of all elements in the predicted Tensor. - Returns: Predicted mask. - ''' +def pred_to_multiclass_mask(pred: torch.Tensor) -> torch.Tensor: + """Apply Softmax on the predicted tensor to rescale the values in the range [0, 1] + and sum to 1. Then apply argmax to get the indices of the maximum value of all + elements in the predicted Tensor. + + Args: + pred: [C,W,H,D] activation tensor. + + Returns: + Predicted mask. + """ + pred = pred.softmax(dim=0) + return pred.argmax(dim=0, keepdims=True) # %% ../nbs/02_vision_data.ipynb 6 -def batch_pred_to_multiclass_mask(pred:torch.Tensor # [B, C, W, H, D] batch of activations - ) -> (torch.Tensor, int): - '''Convert a batch of predicted activation tensors to masks. - Returns batch of predicted masks and number of classes. - ''' - +def batch_pred_to_multiclass_mask(pred: torch.Tensor) -> (torch.Tensor, int): + """Convert a batch of predicted activation tensors to masks. + + Args: + pred: [B, C, W, H, D] batch of activations. + + Returns: + Tuple of batch of predicted masks and number of classes. + """ + n_classes = pred.shape[1] pred = [pred_to_multiclass_mask(p) for p in pred] return torch.stack(pred), n_classes # %% ../nbs/02_vision_data.ipynb 7 -def pred_to_binary_mask(pred # [B, C, W, H, D] or [C, W, H, D] activation tensor - ) -> torch.Tensor: - '''Apply Sigmoid function that squishes activations into a range between 0 and 1. - Then we classify all values greater than or equal to 0.5 to 1, and the values below 0.5 to 0. - - Returns predicted binary mask(s). - ''' - +def pred_to_binary_mask(pred: torch.Tensor) -> torch.Tensor: + """Apply Sigmoid function that squishes activations into a range between 0 and 1. + Then we classify all values greater than or equal to 0.5 to 1, + and the values below 0.5 to 0. + + Args: + pred: [B, C, W, H, D] or [C, W, H, D] activation tensor + + Returns: + Predicted binary mask(s). + """ + pred = torch.sigmoid(pred) - return torch.where(pred>=0.5, 1, 0) + + return torch.where(pred >= 0.5, 1, 0) # %% ../nbs/02_vision_data.ipynb 9 class MedDataBlock(DataBlock): - '''Container to quickly build dataloaders.''' + """Container to quickly build dataloaders.""" + #TODO add get_x + def __init__(self, blocks: list = None, dl_type: TfmdDL = None, getters: list = None, + n_inp: int = None, item_tfms: list = None, batch_tfms: list = None, + reorder: bool = False, resample: (int, list) = None, **kwargs): - def __init__(self, blocks:list=None,dl_type:TfmdDL=None, getters:list=None, n_inp:int=None, item_tfms:list=None, - batch_tfms:list=None, reorder:bool=False, resample:(int, list)=None, **kwargs): + super().__init__(blocks, dl_type, getters, n_inp, item_tfms, + batch_tfms, **kwargs) - super().__init__(blocks, dl_type, getters, n_inp, item_tfms, batch_tfms, **kwargs) - MedBase.item_preprocessing(resample,reorder) + MedBase.item_preprocessing(resample, reorder) -# %% ../nbs/02_vision_data.ipynb 12 +# %% ../nbs/02_vision_data.ipynb 11 def MedMaskBlock(): + """Create a TransformBlock for medical masks.""" return TransformBlock(type_tfms=MedMask.create) -# %% ../nbs/02_vision_data.ipynb 14 +# %% ../nbs/02_vision_data.ipynb 13 class MedImageDataLoaders(DataLoaders): - '''Higher-level `MedDataBlock` API.''' - + """Higher-level `MedDataBlock` API.""" + @classmethod @delegates(DataLoaders.from_dblock) - def from_df(cls, df, valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None, - y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, reorder=False, resample=None, **kwargs): - '''Create from DataFrame.''' - + def from_df(cls, df, valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', + label_col=1, label_delim=None, y_block=None, valid_col=None, + item_tfms=None, batch_tfms=None, reorder=False, resample=None, **kwargs): + """Create from DataFrame.""" + if y_block is None: is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None y_block = MultiCategoryBlock if is_multi else CategoryBlock - splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col) + splitter = (RandomSplitter(valid_pct, seed=seed) + if valid_col is None else ColSplitter(valid_col)) - dblock = MedDataBlock(blocks=(ImageBlock(cls=MedImage), y_block), get_x=ColReader(fn_col, suff=suff), - get_y=ColReader(label_col, label_delim=label_delim), - splitter=splitter, - item_tfms=item_tfms, - reorder=reorder, - resample=resample) + dblock = MedDataBlock( + blocks=(ImageBlock(cls=MedImage), y_block), + get_x=ColReader(fn_col, suff=suff), + get_y=ColReader(label_col, label_delim=label_delim), + splitter=splitter, + item_tfms=item_tfms, + reorder=reorder, + resample=resample + ) return cls.from_dblock(dblock, df, **kwargs) -# %% ../nbs/02_vision_data.ipynb 19 +# %% ../nbs/02_vision_data.ipynb 16 @typedispatch -def show_batch(x:MedImage, y, samples, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - '''Showing a batch of samples for classification and regression tasks.''' - - if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize) +def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None, + ncols=None, figsize=None, channel: int = 0, indices=None, + anatomical_plane: int = 0, **kwargs): + """Showing a batch of samples for classification and regression tasks.""" + + if ctxs is None: + ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize) + n = 1 if y is None else 2 + for i in range(n): - ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))] + ctxs = [ + b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) + for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n)) + ] plt.tight_layout() + return ctxs -# %% ../nbs/02_vision_data.ipynb 20 +# %% ../nbs/02_vision_data.ipynb 17 @typedispatch -def show_batch(x:MedImage, y:MedMask, samples, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - '''Showing a batch of decoded segmentation samples.''' +def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows: int = None, + ncols: int = None, figsize=None, channel: int = 0, indices: int = None, + anatomical_plane: int = 0, **kwargs): + """Showing a batch of decoded segmentation samples.""" nrows, ncols = min(len(samples), max_n), x.shape[1] + 1 imgs = [] - fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs) + fig, axs = subplots(nrows, ncols, figsize=figsize, **kwargs) axs = axs.flatten() - for img, mask in list(zip(x,y)): + for img, mask in zip(x, y): im_channels = [MedImage(c_img[None]) for c_img in img] im_channels.append(MedMask(mask)) imgs.extend(im_channels) - ctxs = [im.show(ax=ax, indices=indices, anatomical_plane=anatomical_plane) for im, ax in zip(imgs, axs)] + ctxs = [im.show(ax=ax, indices=indices, anatomical_plane=anatomical_plane) + for im, ax in zip(imgs, axs)] + plt.tight_layout() return ctxs -# %% ../nbs/02_vision_data.ipynb 22 +# %% ../nbs/02_vision_data.ipynb 19 @typedispatch -def show_results(x:MedImage, y:torch.Tensor, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - '''Showing samples and their corresponding predictions for regression tasks.''' +def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n: int = 6, + nrows: int = None, ncols: int = None, figsize=None, channel: int = 0, + indices: int = None, anatomical_plane: int = 0, **kwargs): + """Showing samples and their corresponding predictions for regression tasks.""" - if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize) + if ctxs is None: + ctxs = get_grid(min(len(samples), max_n), nrows=nrows, + ncols=ncols, figsize=figsize) for i in range(len(samples[0])): - ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))] + ctxs = [ + b.show(ctx=c, channel=channel, indices=indices, + anatomical_plane=anatomical_plane, **kwargs) + for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n)) + ] + for i in range(len(outs[0])): - ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))] + ctxs = [ + b.show(ctx=c, **kwargs) + for b, c, _ in zip(outs.itemgot(i), ctxs, range(max_n)) + ] + return ctxs -# %% ../nbs/02_vision_data.ipynb 23 +# %% ../nbs/02_vision_data.ipynb 20 @typedispatch -def show_results(x:MedImage, y:TensorCategory, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - '''Showing samples and their corresponding predictions for classification tasks.''' - - if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize) +def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None, + max_n: int = 6, nrows: int = None, ncols: int = None, figsize=None, channel: int = 0, + indices: int = None, anatomical_plane: int = 0, **kwargs): + """Showing samples and their corresponding predictions for classification tasks.""" + + if ctxs is None: + ctxs = get_grid(min(len(samples), max_n), nrows=nrows, + ncols=ncols, figsize=figsize) + for i in range(2): - ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))] - ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs) for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))] + ctxs = [b.show(ctx=c, channel=channel, indices=indices, + anatomical_plane=anatomical_plane, **kwargs) + for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n))] + + ctxs = [r.show(ctx=c, color='green' if b == r else 'red', **kwargs) + for b, r, c, _ in zip(samples.itemgot(1), outs.itemgot(0), ctxs, range(max_n))] + return ctxs -# %% ../nbs/02_vision_data.ipynb 24 +# %% ../nbs/02_vision_data.ipynb 21 @typedispatch -def show_results(x:MedImage, y:MedMask, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=1, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - ''' Showing decoded samples and their corresponding predictions for segmentation tasks.''' +def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int = 6, + nrows: int = None, ncols: int = 1, figsize=None, channel: int = 0, + indices: int = None, anatomical_plane: int = 0, **kwargs): + """Showing decoded samples and their corresponding predictions for segmentation tasks.""" + + if ctxs is None: + ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, + figsize=figsize, double=True, title='Target/Prediction') - if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True, title='Target/Prediction') for i in range(2): - ctxs[::2] = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))] - for o in [samples,outs]: - ctxs[1::2] = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))] + ctxs[::2] = [b.show(ctx=c, channel=channel, indices=indices, + anatomical_plane=anatomical_plane, **kwargs) + for b, c, _ in zip(samples.itemgot(i), ctxs[::2], range(2 * max_n))] + + for o in [samples, outs]: + ctxs[1::2] = [b.show(ctx=c, channel=channel, indices=indices, + anatomical_plane=anatomical_plane, **kwargs) + for b, c, _ in zip(o.itemgot(0), ctxs[1::2], range(2 * max_n))] + return ctxs -# %% ../nbs/02_vision_data.ipynb 26 +# %% ../nbs/02_vision_data.ipynb 23 @typedispatch -def plot_top_losses(x: MedImage, y, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - '''Show images in top_losses along with their prediction, actual, loss, and probability of actual class.''' +def plot_top_losses(x: MedImage, y, samples, outs, raws, losses, nrows: int = None, + ncols: int = None, figsize=None, channel: int = 0, indices: int = None, + anatomical_plane: int = 0, **kwargs): + """Show images in top_losses along with their prediction, actual, loss, and probability of actual class.""" - title = 'Prediction/Actual/Loss' if type(y) == torch.Tensor else 'Prediction/Actual/Loss/Probability' + title = 'Prediction/Actual/Loss' if isinstance(y, torch.Tensor) else 'Prediction/Actual/Loss/Probability' axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize, title=title) - for ax,s,o,r,l in zip(axs, samples, outs, raws, losses): + + for ax, s, o, r, l in zip(axs, samples, outs, raws, losses): s[0].show(ctx=ax, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) - if type(y) == torch.Tensor: ax.set_title(f'{r.max().item():.2f}/{s[1]} / {l.item():.2f}') - else: ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}') -# %% ../nbs/02_vision_data.ipynb 27 + if isinstance(y, torch.Tensor): + ax.set_title(f'{r.max().item():.2f}/{s[1]} / {l.item():.2f}') + else: + ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}') + +# %% ../nbs/02_vision_data.ipynb 24 @typedispatch -def plot_top_losses(x: MedImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs): - #TODO: not tested yet +def plot_top_losses(x: MedImage, y: TensorMultiCategory, samples, outs, raws, + losses, nrows: int = None, ncols: int = None, figsize=None, + channel: int = 0, indices: int = None, + anatomical_plane: int = 0, **kwargs): + # TODO: not tested yet axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize) - for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) + + for i, (ax, s) in enumerate(zip(axs, samples)): + s[0].show(ctx=ax, title=f'Image {i}', channel=channel, + indices=indices, anatomical_plane=anatomical_plane, **kwargs) + rows = get_empty_df(len(samples)) - outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses)) - for i,l in enumerate(["target", "predicted", "probabilities", "loss"]): - rows = [b.show(ctx=r, label=l, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,r in zip(outs.itemgot(i),rows)] + outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) + for s, o, r, l in zip(samples, outs, raws, losses)) + + for i, l in enumerate(["target", "predicted", "probabilities", "loss"]): + rows = [b.show(ctx=r, label=l, channel=channel, indices=indices, + anatomical_plane=anatomical_plane, **kwargs) + for b, r in zip(outs.itemgot(i), rows)] + display_df(pd.DataFrame(rows)) diff --git a/fastMONAI/vision_inference.py b/fastMONAI/vision_inference.py index 1d35df0..e1ced7b 100644 --- a/fastMONAI/vision_inference.py +++ b/fastMONAI/vision_inference.py @@ -24,27 +24,42 @@ def _to_original_orientation(input_img, org_orientation): return reoriented_array[None] # %% ../nbs/06_vision_inference.ipynb 4 -def _do_resize(o, target_shape, image_interpolation='linear', label_interpolation='nearest'): - '''Resample images so the output shape matches the given target shape.''' +def _do_resize(o, target_shape, image_interpolation='linear', + label_interpolation='nearest'): + """ + Resample images so the output shape matches the given target shape. + """ - resize = Resize(target_shape, image_interpolation=image_interpolation, label_interpolation=label_interpolation) + resize = Resize( + target_shape, + image_interpolation=image_interpolation, + label_interpolation=label_interpolation + ) + return resize(o) # %% ../nbs/06_vision_inference.ipynb 5 -def inference(learn_inf, reorder, resample, fn:(Path,str)='', save_path:(str,Path)=None, org_img=None, input_img=None, org_size=None): - '''Predict on new data using exported model''' +def inference(learn_inf, reorder, resample, fn: (str, Path) = '', + save_path: (str, Path) = None, org_img=None, input_img=None, + org_size=None): + """Predict on new data using exported model.""" + if None in [org_img, input_img, org_size]: - org_img, input_img, org_size = med_img_reader(fn, reorder, resample, only_tensor=False) - else: org_img, input_img = copy(org_img), copy(input_img) + org_img, input_img, org_size = med_img_reader(fn, reorder, resample, + only_tensor=False) + else: + org_img, input_img = copy(org_img), copy(input_img) - pred, *_ = learn_inf.predict(input_img.data); + pred, *_ = learn_inf.predict(input_img.data) - pred_mask = do_pad_or_crop(pred.float(), input_img.shape[1:], padding_mode=0, mask_name=None) + pred_mask = do_pad_or_crop(pred.float(), input_img.shape[1:], padding_mode=0, + mask_name=None) input_img.set_data(pred_mask) input_img = _do_resize(input_img, org_size, image_interpolation='nearest') - reoriented_array = _to_original_orientation(input_img.as_sitk(), ('').join(org_img.orientation)) + reoriented_array = _to_original_orientation(input_img.as_sitk(), + ('').join(org_img.orientation)) org_img.set_data(reoriented_array) @@ -56,12 +71,10 @@ def inference(learn_inf, reorder, resample, fn:(Path,str)='', save_path:(str,Pat return org_img # %% ../nbs/06_vision_inference.ipynb 7 -def refine_binary_pred_mask( - pred_mask, - remove_size: (int, float) = None, - percentage: float = 0.2, - verbose: bool = False -): +def refine_binary_pred_mask(pred_mask, + remove_size: (int, float) = None, + percentage: float = 0.2, + verbose: bool = False) -> np.ndarray: """Removes small objects from the predicted binary mask. Args: @@ -74,6 +87,7 @@ def refine_binary_pred_mask( Returns: The processed mask with small objects removed. """ + labeled_mask, n_components = label(pred_mask) if verbose: diff --git a/fastMONAI/vision_loss.py b/fastMONAI/vision_loss.py index 197da04..4bb3884 100644 --- a/fastMONAI/vision_loss.py +++ b/fastMONAI/vision_loss.py @@ -12,40 +12,61 @@ # %% ../nbs/04_vision_loss_functions.ipynb 3 class CustomLoss: - '''Wrapper to get show_results to work.''' + """A custom loss wrapper class for loss functions to allow them to work with + the 'show_results' method in fastai. + """ def __init__(self, loss_func): + """Constructs CustomLoss object. + + Args: + loss_func: The loss function to be wrapped. + """ + self.loss_func = loss_func def __call__(self, pred, targ): - if isinstance(pred, MedBase): pred, targ = torch.Tensor(pred.cpu()), torch.Tensor(targ.cpu().float()) + """Computes the loss for given predictions and targets. + + Args: + pred: The predicted outputs. + targ: The ground truth targets. + + Returns: + The computed loss. + """ + + if isinstance(pred, MedBase): + pred, targ = torch.Tensor(pred.cpu()), torch.Tensor(targ.cpu().float()) + return self.loss_func(pred, targ) def activation(self, x): return x - def decodes(self, x): - '''Converts model output to target format. - + def decodes(self, x) -> torch.Tensor: + """Converts model output to target format. + Args: - x: Activations for each class [B, C, W, H, D] + x: Activations for each class with dimensions [B, C, W, H, D]. Returns: - torch.Tensor: Predicted mask. - ''' - + The predicted mask. + """ + n_classes = x.shape[1] - if n_classes == 1: x = pred_to_binary_mask(x) - else: x,_ = batch_pred_to_multiclass_mask(x) + if n_classes == 1: + x = pred_to_binary_mask(x) + else: + x,_ = batch_pred_to_multiclass_mask(x) return x # %% ../nbs/04_vision_loss_functions.ipynb 4 class TverskyFocalLoss(_Loss): """ - Compute both Dice loss and Focal Loss, and return the weighted sum of these two losses. - The details of Dice loss is shown in ``monai.losses.DiceLoss``. - The details of Focal Loss is shown in ``monai.losses.FocalLoss``. + Compute Tversky loss with a focus parameter, gamma, applied. + The details of Tversky loss is shown in ``monai.losses.TverskyLoss``. """ def __init__( @@ -54,45 +75,45 @@ def __init__( to_onehot_y: bool = False, sigmoid: bool = False, softmax: bool = False, - reduction: str = "mean", gamma: float = 2, - #focal_weight: (float, int, torch.Tensor) = None, - #lambda_dice: float = 1.0, - #lambda_focal: float = 1.0, - alpha = 0.5, - beta = 0.99 - ) -> None: - + alpha: float = 0.5, + beta: float = 0.99): + """ + Args: + include_background: if to calculate loss for the background class. + to_onehot_y: whether to convert `y` into one-hot format. + sigmoid: if True, apply a sigmoid function to the prediction. + softmax: if True, apply a softmax function to the prediction. + gamma: the focal parameter, it modulates the loss with regards to + how far the prediction is from target. + alpha: the weight of false positive in Tversky loss calculation. + beta: the weight of false negative in Tversky loss calculation. + """ + super().__init__() - self.tversky = TverskyLoss(to_onehot_y=to_onehot_y, include_background=include_background, sigmoid=sigmoid, softmax=softmax, alpha=alpha, beta=beta) - #self.focal = FocalLoss(to_onehot_y=to_onehot_y, include_background=include_background, gamma=gamma, weight=focal_weight, reduction=reduction) - - #if lambda_dice < 0.0: raise ValueError("lambda_dice should be no less than 0.0.") - #if lambda_focal < 0.0: raise ValueError("lambda_focal should be no less than 0.0.") - #self.lambda_dice = lambda_dice - #self.lambda_focal = lambda_focal - self.to_onehot_y = to_onehot_y + self.tversky = TverskyLoss( + to_onehot_y=to_onehot_y, + include_background=include_background, + sigmoid=sigmoid, + softmax=softmax, + alpha=alpha, + beta=beta + ) self.gamma = gamma - self.include_background = include_background def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ Args: - input: the shape should be BNH[WD]. The input should be the original logits - due to the restriction of ``monai.losses.FocalLoss``. - target: the shape should be BNH[WD] or B1H[WD]. + input: the shape should be [B, C, W, H, D]. The input should be the original logits. + target: the shape should be[B, C, W, H, D]. + Raises: ValueError: When number of dimensions for input and target are different. - ValueError: When number of channels for target is neither 1 nor the same as input. """ if len(input.shape) != len(target.shape): - raise ValueError("the number of dimensions for input and target should be the same.") - - n_pred_ch = input.shape[1] + raise ValueError("The number of dimensions for input and target should be the same.") tversky_loss = self.tversky(input, target) - #focal_loss = self.focal(input, target) - total_loss: torch.Tensor = 1 - ((1 - tversky_loss)**self.gamma) #tversky_loss - #print(total_loss,total_loss.shape) - #tversky_loss + focal_loss + total_loss: torch.Tensor = 1 - ((1 - tversky_loss)**self.gamma) + return total_loss diff --git a/fastMONAI/vision_metrics.py b/fastMONAI/vision_metrics.py index 69ea891..d2ca7a2 100644 --- a/fastMONAI/vision_metrics.py +++ b/fastMONAI/vision_metrics.py @@ -11,50 +11,67 @@ from .vision_data import pred_to_binary_mask, batch_pred_to_multiclass_mask # %% ../nbs/05_vision_metrics.ipynb 3 -def calculate_dsc(pred, targ): - ''' MONAI `compute_meandice`''' +def calculate_dsc(pred: torch.Tensor, targ: torch.Tensor) -> torch.Tensor: + """MONAI `compute_meandice`""" return torch.Tensor([compute_dice(p[None], t[None]) for p, t in list(zip(pred,targ))]) # %% ../nbs/05_vision_metrics.ipynb 4 -def calculate_haus(pred, targ): - ''' MONAI `compute_hausdorff_distance`''' +def calculate_haus(pred: torch.Tensor, targ: torch.Tensor) -> torch.Tensor: + """MONAI `compute_hausdorff_distance`""" return torch.Tensor([compute_hausdorff_distance(p[None], t[None]) for p, t in list(zip(pred,targ))]) # %% ../nbs/05_vision_metrics.ipynb 5 -def binary_dice_score(act, # Activation tensor [B, C, W, H, D] - targ # Target masks [B, C, W, H, D] - ) -> torch.Tensor: - '''Calculate the mean Dice score for binary semantic segmentation tasks.''' - +def binary_dice_score(act: torch.tensor, targ: torch.Tensor) -> torch.Tensor: + """Calculates the mean Dice score for binary semantic segmentation tasks. + + Args: + act: Activation tensor with dimensions [B, C, W, H, D]. + targ: Target masks with dimensions [B, C, W, H, D]. + + Returns: + Mean Dice score. + """ pred = pred_to_binary_mask(act) dsc = calculate_dsc(pred.cpu(), targ.cpu()) return torch.mean(dsc) # %% ../nbs/05_vision_metrics.ipynb 6 -def multi_dice_score(act, # Activation values [B, C, W, H, D] - targ # Target masks [B, C, W, H, D] - ) -> torch.Tensor: - '''Calculate the mean Dice score for each class in multi-class semantic segmentation tasks.''' +def multi_dice_score(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor: + """Calculate the mean Dice score for each class in multi-class semantic + segmentation tasks. + Args: + act: Activation tensor with dimensions [B, C, W, H, D]. + targ: Target masks with dimensions [B, C, W, H, D]. + Returns: + Mean Dice score for each class. + """ pred, n_classes = batch_pred_to_multiclass_mask(act) binary_dice_scores = [] for c in range(1, n_classes): - c_pred, c_targ = torch.where(pred==c, 1, 0), torch.where(targ==c, 1, 0) + c_pred, c_targ = torch.where(pred == c, 1, 0), torch.where(targ == c, 1, 0) dsc = calculate_dsc(c_pred, c_targ) - binary_dice_scores.append(np.nanmean(dsc)) #TODO update torch to get torch.nanmean() to work + binary_dice_scores.append(np.nanmean(dsc)) # #TODO update torch to get torch.nanmean() to work return torch.Tensor(binary_dice_scores) # %% ../nbs/05_vision_metrics.ipynb 7 -def binary_hausdorff_distance(act, # Activation tensor [B, C, W, H, D] - targ # Target masks [B, C, W, H, D] - ) -> torch.Tensor: - '''Calculate the mean Hausdorff distance for binary semantic segmentation tasks.''' +def binary_hausdorff_distance(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor: + """Calculate the mean Hausdorff distance for binary semantic segmentation tasks. + + Args: + act: Activation tensor with dimensions [B, C, W, H, D]. + targ: Target masks with dimensions [B, C, W, H, D]. + + Returns: + Mean Hausdorff distance. + """ + pred = pred_to_binary_mask(act) @@ -62,10 +79,16 @@ def binary_hausdorff_distance(act, # Activation tensor [B, C, W, H, D] return torch.mean(haus) # %% ../nbs/05_vision_metrics.ipynb 8 -def multi_hausdorff_distance(act, # Activation tensor [B, C, W, H, D] - targ # Target masks [B, C, W, H, D] - ) -> torch.Tensor : - '''Calculate the mean Hausdorff distance for each class in multi-class semantic segmentation tasks.''' +def multi_hausdorff_distance(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor : + """Calculate the mean Hausdorff distance for each class in multi-class semantic segmentation tasks. + + Args: + act: Activation tensor with dimensions [B, C, W, H, D]. + targ: Target masks with dimensions [B, C, W, H, D]. + + Returns: + Mean Hausdorff distance for each class. + """ pred, n_classes = batch_pred_to_multiclass_mask(act) binary_haus = [] diff --git a/fastMONAI/vision_plot.py b/fastMONAI/vision_plot.py index 8111266..e48b376 100644 --- a/fastMONAI/vision_plot.py +++ b/fastMONAI/vision_plot.py @@ -9,8 +9,7 @@ # %% ../nbs/00_vision_plot.ipynb 3 def _get_slice(image, channel: int, indices: (int, list), anatomical_plane: int, voxel_size: (int, list)): - """ - A private method to get a 2D tensor and aspect ratio for plotting. + """A private method to get a 2D tensor and aspect ratio for plotting. This is modified code from the torchio function `plot_volume`. Args: @@ -53,11 +52,9 @@ def _get_slice(image, channel: int, indices: (int, list), anatomical_plane: int, # %% ../nbs/00_vision_plot.ipynb 4 @delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim']) -def show_med_img( - im, ctx, channel: int, indices: (int, list), anatomical_plane: int, - voxel_size: (int, list), ax=None, figsize=None, title=None, **kwargs): - """ - Show an image on `ax`. This is a modified code from the fastai function `show_image`. +def show_med_img(im, ctx, channel: int, indices: (int, list), anatomical_plane: int, + voxel_size: (int, list), ax=None, figsize=None, title=None, **kwargs): + """Show an image on `ax`. This is a modified code from the fastai function `show_image`. Args: im: The input image. @@ -74,18 +71,23 @@ def show_med_img( Returns: Axis with the plot. """ - if hasattrs(im, ('data', 'cpu', 'permute')): + if hasattrs(im, ('data', 'cpu', 'permute')): # Check if `im` has the necessary attributes im = im.data.cpu() im, aspect = _get_slice( - im, channel=channel, anatomical_plane=anatomical_plane, - voxel_size=voxel_size, indices=indices + im, + channel=channel, + anatomical_plane=anatomical_plane, + voxel_size=voxel_size, + indices=indices ) - ax = ifnone(ax, ctx) - if ax is None: - _, ax = plt.subplots(figsize=figsize) # ax is only None when .show() is used. + ax = ax if ax is not None else ctx + + if ax is None: # ax is only None when .show() is used. + _, ax = plt.subplots(figsize=figsize) ax.imshow(im, aspect=aspect, **kwargs) + if title is not None: ax.set_title(title) diff --git a/nbs/00_vision_plot.ipynb b/nbs/00_vision_plot.ipynb index 984325c..4f7714f 100644 --- a/nbs/00_vision_plot.ipynb +++ b/nbs/00_vision_plot.ipynb @@ -40,8 +40,7 @@ "source": [ "#| export\n", "def _get_slice(image, channel: int, indices: (int, list), anatomical_plane: int, voxel_size: (int, list)):\n", - " \"\"\"\n", - " A private method to get a 2D tensor and aspect ratio for plotting.\n", + " \"\"\"A private method to get a 2D tensor and aspect ratio for plotting.\n", " This is modified code from the torchio function `plot_volume`.\n", "\n", " Args:\n", @@ -86,17 +85,15 @@ { "cell_type": "code", "execution_count": null, - "id": "c9dc9d12-ade9-4e96-a2da-82a0d1d04fdc", + "id": "7955e15b-7580-4219-838e-93ff094e146a", "metadata": {}, "outputs": [], "source": [ "#| export\n", "@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])\n", - "def show_med_img(\n", - " im, ctx, channel: int, indices: (int, list), anatomical_plane: int,\n", - " voxel_size: (int, list), ax=None, figsize=None, title=None, **kwargs):\n", - " \"\"\"\n", - " Show an image on `ax`. This is a modified code from the fastai function `show_image`.\n", + "def show_med_img(im, ctx, channel: int, indices: (int, list), anatomical_plane: int,\n", + " voxel_size: (int, list), ax=None, figsize=None, title=None, **kwargs):\n", + " \"\"\"Show an image on `ax`. This is a modified code from the fastai function `show_image`.\n", "\n", " Args:\n", " im: The input image.\n", @@ -113,18 +110,23 @@ " Returns:\n", " Axis with the plot.\n", " \"\"\"\n", - " if hasattrs(im, ('data', 'cpu', 'permute')):\n", + " if hasattrs(im, ('data', 'cpu', 'permute')): # Check if `im` has the necessary attributes\n", " im = im.data.cpu()\n", " im, aspect = _get_slice(\n", - " im, channel=channel, anatomical_plane=anatomical_plane,\n", - " voxel_size=voxel_size, indices=indices\n", + " im, \n", + " channel=channel, \n", + " anatomical_plane=anatomical_plane,\n", + " voxel_size=voxel_size, \n", + " indices=indices\n", " )\n", "\n", - " ax = ifnone(ax, ctx)\n", - " if ax is None:\n", - " _, ax = plt.subplots(figsize=figsize) # ax is only None when .show() is used.\n", + " ax = ax if ax is not None else ctx \n", + "\n", + " if ax is None: # ax is only None when .show() is used.\n", + " _, ax = plt.subplots(figsize=figsize)\n", "\n", " ax.imshow(im, aspect=aspect, **kwargs)\n", + "\n", " if title is not None:\n", " ax.set_title(title)\n", "\n", diff --git a/nbs/01_vision_core.ipynb b/nbs/01_vision_core.ipynb index 9b9ff91..443d4e8 100644 --- a/nbs/01_vision_core.ipynb +++ b/nbs/01_vision_core.ipynb @@ -54,7 +54,8 @@ "source": [ "#| export\n", "def _preprocess(obj, reorder, resample):\n", - " \"\"\"Preprocesses the given object.\n", + " \"\"\"\n", + " Preprocesses the given object.\n", "\n", " Args:\n", " obj: The object to preprocess.\n", @@ -147,12 +148,8 @@ "outputs": [], "source": [ "#| export\n", - "def med_img_reader(\n", - " file_path: (str, Path),\n", - " dtype=torch.Tensor,\n", - " reorder: bool = False,\n", - " resample: list = None,\n", - " only_tensor: bool = True\n", + "def med_img_reader(file_path: (str, Path), dtype=torch.Tensor, reorder: bool = False,\n", + " resample: list = None, only_tensor: bool = True\n", "):\n", " \"\"\"Loads and preprocesses a medical image.\n", "\n", @@ -198,9 +195,10 @@ "source": [ "#| export\n", "class MetaResolver(type(torch.Tensor), metaclass=BypassNewMeta):\n", - " '''A class to bypass metaclass conflict:\n", + " \"\"\"\n", + " A class to bypass metaclass conflict:\n", " https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/data/batch.html\n", - " '''\n", + " \"\"\"\n", " pass" ] }, @@ -211,26 +209,29 @@ "outputs": [], "source": [ "#| export\n", - "class MedBase(torch.Tensor, metaclass=MetaResolver): \n", - " '''A class that represents an image object. Metaclass casts x to this class if it is of type cls._bypass_type.'''\n", - "\n", - " _bypass_type=torch.Tensor\n", + "class MedBase(torch.Tensor, metaclass=MetaResolver):\n", + " \"\"\"A class that represents an image object.\n", + " Metaclass casts `x` to this class if it is of type `cls._bypass_type`.\"\"\"\n", + " \n", + " _bypass_type = torch.Tensor\n", " _show_args = {'cmap':'gray'}\n", " resample, reorder = None, False\n", " affine_matrix = None\n", "\n", - "\n", " @classmethod\n", - " def create(cls, fn: (Path, str, torch.Tensor), **kwargs):\n", + " def create(cls, fn: (Path, str, torch.Tensor), **kwargs) -> torch.Tensor:\n", " \"\"\"\n", - " Open a medical image and cast to MedBase object. If it is a torch.Tensor, cast to MedBase object.\n", + " Opens a medical image and casts it to MedBase object.\n", + " If `fn` is a torch.Tensor, it's cast to MedBase object.\n", "\n", " Args:\n", - " fn: Image path or a 4D torch.Tensor.\n", - " kwargs: Additional parameters.\n", + " fn : (Path, str, torch.Tensor)\n", + " Image path or a 4D torch.Tensor.\n", + " kwargs : dict\n", + " Additional parameters for the medical image reader.\n", "\n", " Returns:\n", - " A 4D tensor as MedBase object.\n", + " torch.Tensor : A 4D tensor as a MedBase object.\n", " \"\"\"\n", " if isinstance(fn, torch.Tensor):\n", " return cls(fn)\n", @@ -240,18 +241,32 @@ " @classmethod\n", " def item_preprocessing(cls, resample: (list, int, tuple), reorder: bool):\n", " \"\"\"\n", - " Change the values for the class variables `resample` and `reorder`.\n", + " Changes the values for the class variables `resample` and `reorder`.\n", "\n", " Args:\n", - " resample: A list with voxel spacing.\n", - " reorder: Whether to reorder the data to be closest to canonical (RAS+) orientation.\n", + " resample : (list, int, tuple)\n", + " A list with voxel spacing.\n", + " reorder : bool\n", + " Whether to reorder the data to be closest to canonical (RAS+) orientation.\n", " \"\"\"\n", " cls.resample = resample\n", " cls.reorder = reorder\n", "\n", - " def show(self, ctx=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", + " def show(self, ctx=None, channel: int = 0, indices: int = None, anatomical_plane: int = 0, **kwargs):\n", " \"\"\"\n", - " Show Medimage using `merge(self._show_args, kwargs)`.\n", + " Displays the Medimage using `merge(self._show_args, kwargs)`.\n", + "\n", + " Args:\n", + " ctx : Any, optional\n", + " Context to use for the display. Defaults to None.\n", + " channel : int, optional\n", + " The channel of the image to be displayed. Defaults to 0.\n", + " indices : list or None, optional\n", + " Indices of the images to be displayed. Defaults to None.\n", + " anatomical_plane : int, optional\n", + " Anatomical plane of the image to be displayed. Defaults to 0.\n", + " kwargs : dict, optional\n", + " Additional parameters for the show function.\n", "\n", " Returns:\n", " Shown image.\n", @@ -262,7 +277,8 @@ " **merge(self._show_args, kwargs)\n", " )\n", "\n", - " def __repr__(self):\n", + " def __repr__(self) -> str:\n", + " \"\"\"Returns the string representation of the MedBase instance.\"\"\"\n", " return f'{self.__class__.__name__} mode={self.mode} size={\"x\".join([str(d) for d in self.size])}'" ] }, @@ -274,7 +290,7 @@ "source": [ "#| export\n", "class MedImage(MedBase):\n", - " '''Subclass of MedBase that represents an image object.'''\n", + " \"\"\"Subclass of MedBase that represents an image object.\"\"\"\n", " pass" ] }, @@ -286,7 +302,7 @@ "source": [ "#| export\n", "class MedMask(MedBase):\n", - " '''Subclass of MedBase that represents an mask object.'''\n", + " \"\"\"Subclass of MedBase that represents an mask object.\"\"\"\n", " _show_args = {'alpha':0.5, 'cmap':'tab20'}" ] }, @@ -305,17 +321,23 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOcAAADnCAYAAADl9EEgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAAsTAAALEwEAmpwYAACeoklEQVR4nO29x2+kWXbm/YQhgwyGt/RJpivbprqlltRyEISBFgPMdjB/4GAWgwFmMBupB9JCpjVqfdUtle9Kx0x6ho9gGDLct+D8Tpx4i1mqKpVhdvMCicwkI15z7z3uOc85NzSdTnU7bsftuHkj/F0/wO24Hbfj+nErnLfjdtzQcSuct+N23NBxK5y343bc0HErnLfjdtzQEf28X4ZCoVso93bcjm94TKfT0HU/v7Wct+N23NBxK5y343bc0HErnLfjdtzQcSuct+N23NBxK5y343bc0HErnLfjdtzQcSuct+N23NBxK5y343bc0HErnLfjdtzQcSucN3iEQtcSR27Hb8m4Fc4bPm4F9Ld3fC639nZ8t+O2S8Vv97i1nL9lIxQK3VrjV2TcCuftuB03dNwK52/huHWXX41xK5yv+PiyLuqtYL4641Y4X/FxK2y/ueNWOG/H7bih41Y4b8ftuKHjVjhvx+24oeNWOH+Lxm1+89UatwyhV3yEQqHPgEL+Z14gISBcByL5n73sM9fd+7rv346vZ9wK5ys0EIag4AWF0f+b/79MMIPW9MtY11vh/GbHrXC+AiMoVNcJkBfULzPC4avIZjKZXHu94EDQg5/9Ive97ru3Qv3ycSucN3wE3VLGdDqds4j8LPjZUChkAvgydzZoYYOf80J1nRAGr3Gddfa/8897K6wvH7fCeQOHFxT/My9kfCYoeP7vcDh87TW8oLzMInNd6cqqvkxwg98JXvdlQuv/7b/n3/9l8/HbIsChz3vR2+MYvvnxb8V8wU36sjjvug38MmDHCwzurHdvr7PW0+lUkUjkM9bvuud9mRD6e/nrvmwPBq35b6pQvuw4hlvh/A4HlmwymcxZKr8h2cR8hu8gWEEhQfDC4fBnrF/wc5PJROPxWNK8wHBd7jMejz9zX2+1ufZ11tjf0//bf8f/Lvh7/7uXCf2rPl4mnLdu7bc4rovXEITr4jxpJjShUMgsVzgcViQSuRaZjUQiWlhYkCQtLS0pFospHA5raWnJPrO0tKRer6fRaKTFxUUNBoO5e49GI11cXGhhYUHj8ViXl5eaTqcajUZzbrEkjcdjE1b/76CFn0wm9jt+j2Lyn3tZHP2bJIxfdNwK57c0rnMDX+aWegHAAi4sLCgajSoWiykajSoSiSgej9v/l5aWNJlMFIvFFI/HNR6PFYlEFIvF7JpYyWg0qsFgoOl0qmg0qsvLy7lnxaLyXYRzMpmYUri8vNTCwoIikciccHc6HY1GI/sjSYPBQJeXlxqNRrq8vDSrzX1Go9FnhNA/73UWNSjYv4nj1q39Bsd1eUj+7zdcME5EqKLRqKLRqJaXl00A8/m8WcGFhYW5TXpxcWECxKZneBdWkgkZguHdZkY0GjVXlp8vLCwoFAppPB4rGr3S7ePx2JTIeDyes4DRaNQEcTgc6vLyUtFoVO12W8PhUBcXFzo/P7dnuLy81OXl5ZwV9nPzMg8j+O9XadzGnN/yeBlgEvw5Lip/Ly0tKR6PK5PJaGlpSUtLS1peXtZwODSBYKNOJhMTyMlkotFoZEInXQlgNBo1gedzi4uLCoVCuri4MOHjuwgwQvuy1EkkEtHi4qLdNxhD+meQrgSday8vL2s0GmlhYUHT6VTdbtdc6PPzcw0GA/X7ffV6PXW7XRNUr1yC9/k8ob3p41Y4v8XxbyXpcTMXFha0tLSkRCKhlZUVLS8vKxaLaWFhwVxTNiRuKL+X5oEb4spEImFCtbi4aH+wsFwHNzOZTCoajZrw9/t9DYfDOaElnkTQLy8vFQ6HzSqGQiENh8M564fQ8r7ehWV464qL7N3X8/NztVot9Xo9DQYD9Xo9e26+z/W/iDDeVIG9Fc5vYQQF0IMbHkWNxWImjPF4XMViUfF43Fw/BAjBWFlZse+kUilJVxttcXFRKysrisfjkq6s0+Lioi4uLszVRDgBdobDof2JRqOKx+NaXFycE67z83O7nneV4/G4Wb1Op6OTkxOFw2Ftb29rOp1qOBwqEomo1+vp4uLCLCru7nA4NOHlPYfDoUajkT2vJFNckkzIURyVSkWtVsueKzheRYt6i9Z+w+O6HKQXyIWFBS0vLyuRSCifzyudTms8Huvi4sL+TCYTizGXl5eVTqeVSqXMYoVCIVWrVbXbbaXTaS0vLyscDuvy8tJAF9xVBBy38vLyUhcXF3PgEK6pdOXGAjB1u12NRiMDhEKhkGKxmAkYLiiusCRzlXFVl5eXtbS0ZIjwdDrVYDBQOBy2WBOF0el0NBgM5hQIf+MVMB+JREK1Wk3tdlvdbleDwWDOGjP3n/f/V2XcWs6vYVwXR/rcZTweV6lU0urqquLxuAkHFiYajSqZTCqTySibzSqVSmk8HiuTySiXy6lWq6lSqaher+vJkyfqdrvKZDKGlAKijEYjA2vYsDwLLjICxM+IYXGZo9GoWTxiVVxYroeywLJjff3v4vG40um0/R2LxSzGxTVH2C8vL9Vut80dHg6HJngoHpQJyiAUCqnb7aparaper6vf738mzxrMy34eweO7HLdu7dc8gi7sdQK6tLSkXC6n1dVVpVIpRSIR23T8LpvNKpfLKZ1Om2C0220dHh6a9Ws2m2o2m+p0Omq1WmaNsEIgpDwHwsVGJm4NorJ8j+cNuuIoF29tPaK8sLBgAgqgxT0XFhYUi8Xscwjw0tKSKSIfX+OCk9rp9/saDAZqNpuq1WoaDAYGgI1GIwPOxuOxWq2WKpWKqtWqLi4u5lzb6xBfxq1w/oaM61xW/zu/oUOhkLLZrHZ2drS5uanpdKpqtapWq6V4PK6trS2tr68rkUiYALTbbR0fH6tWq6lWq+ns7Ezdbtcsl09P+HiU5wmmFHw8hkB6i44iCKKywft5yxMkCHgh55ooAq7DHx8DIpQgx5lMRpubm9rc3FShUDAh7ff7ajQaGgwGOj8/18XFhdrttmq1miaTyRygNplMVKlUdHp6qnq9bnFqcC6C4yYI6K1wfs0jaFX4E4/HtbGxoY2NDS0tLanf76vZbCoWi2l7e1v37t3TysqK6vW69vf3dXBwoHq9rkajofPzc0t3eMvGZvaUPAAcCAnhcHhOYL1A8Xt+5kEWruk/G4/HzdJJmkNfsdbdbncO6EFoeX5JhvIyR57IQA4Vt3o8HmtpaUnFYlHlclnr6+taWlrSysqK1tbWLDat1+uqVCo6Pz+32DgajSqRSGh5eVnT6VSNRkMHBweqVqsGOPnhldytcP6GjCB7x7tzy8vLWl1d1YMHD0z4zs7OtLi4qJ2dHd29e1elUknn5+d677339NFHH5l19DEdrqF3QbEkbDKExlP4yFESbxKnSTMLh9AvLCzYNTw/dnFx0RRBIpFQLBaz3yH8KAwEEnS21+vN3f/i4mIO1OH5ggKMQsENBuACnc7lcvre976nu3fvKplMmptfqVRUq9Xs3rCUUqmUCoWChsOh9vb2tLe3p3a7bQogSOz347sS1Fvh/HeM6xYTa7S0tKR0Oq2NjQ2tra0pHA6r1WppMBioUChod3dXKysrqtVqOjg40KNHj3R4eGjgDQKPNVlcXLSNurS0ZEKBwHk3FMH0pHZYRd6ighaTngClDRLXg8LsvQMUBgLoY1WsoM9tjsdjdbtdnZ+fq9/vm2BL85YLIUYJkTPFbWXud3d3tbu7q7W1NeXzeSWTSfX7fZ2dnalararT6ajf7+vi4kKSlM1mlUgk1G639ezZMx0dHVkO1+/5m2BBb4XzK4yXCSVCkEqlVC6XVS6Xtby8rMFgoMFgoHw+r62tLS0vL6vdbuuTTz7R48eP1Wg0zNqy4T1fFoEKha7I6YAj7XbbBALX0ltPvkcyn+eLxWJzQojFk+YtJt/xHFsEyMepnlTPJvfILlYRpYUwI5ikT7wVgw3EvAyHwzkh8/S/paUlpVIpbW1t6Z133tHm5qai0aiazaZOT0/VbDaNqAD3N5PJKBwO6/DwUE+fPlW9Xv+MgL7s39/WuBXOLzGCYI+kz7iwhUJBq6urymazlp9bWVlRuVy2DeNjSp8KiMVi9se7rCCv0mfRU5/SQJh9ygRBQRA82yZo2YLlZIxoNKqVlZU5AfTpDwgNXJt0DAgqzwE7ifnDGuIt8Kx8HutJ2gRyQr/fnwOzSNNMJhMVi0XduXNHr7/+upEgENDLy0udn5+r3W4rHA4rkUgoGo2qXq/r6OhI1WpV5+fnc8/wXQrorXB+wXGdYPpNnk6ntb6+rrW1NbNssVhMmUxG8XhcFxcXevTokZ49e2bkbqxiLBYzVhAbDQFjo1Dm5a0hloNNSk4TV9enU3xciXVaXFxUIpEwa0x8C1vHu8wQ7FdWVhQKhdRsNiXJ0iJeSHG5YRyhfDzQI80qTIg5PQrM97k/18ItxjJ7xcX7LS0tKZPJ6P79+9rd3dXGxobC4bClVZrN5hxJAYXWbDZ1cnKiWq1mJHvpu7Ogt8L5BUbQjfXWKxaLqVgsand3V4lEQoPBwFBC0gLValV7e3uqVqvmVgG+sKnJQ0IAAIAJxoqAKpRZUc2BsEqai/GwRlhZXFsEM51Oa2VlxYRHkgn45eWlxYcIWTwe19LSkllPb3V5jqWlJXtWrLVnMzGHILXMMW4tbjbviSAh+JDgLy4u1Ov15txRrDnspVQqpbfeektvv/22yuWyxuOxDg4OdHp6qlarZfGmJKthPTo60snJibrd7lw+FOv8bY1b4fwCw1tKT7tbWlrS+vq6tra2tLCwoE6no2Qyqe3tbV1eXuqTTz7RixcvDAgKh8NKJpNKJBImkLi+COXy8rIymYwSiYS5fWxGmDFsECxSPB7XysqKuZa4ft6SeoHHSkOlA9RhsCFHo5GxlbBKvlrm4uLChMjXZuKSgxZ791mSKSeP0EJS8P/mXaf/j+InybySXq9nFSrMDSkglJCP0zc3N/U7v/M7euutt5TJZNRsNvXpp5/q9PTU3lG68gQuLy91eHioo6MjdTods+zSt1vkfSucX2AEc5crKytaXV3V1taWCoWC2u22er2e7t69q52dHdXrdb377rv65JNPdH5+bm5fIpFQoVBQNBpVp9OxDQGgkUqltLy8LEmWaG+1WgbIICAIGcKZTCaVTqdN4AFUSGPw3EtLS/YZ72r6GBbrJcmYN+Px2Cw9lhfXV5JZWdIXPmXCvZhHLGsymdTCwoJZrUgkMqcEgtRCaYYa8/Nut2sgUbvd/gzp3VvqSCSiXC6nO3fu6Kc//al+/OMf6+LiQp988olOTk7UbrdVr9c1nU61tLSk8Xis09PTuZTLy9Dbb0pQb4XzJeM62h2Cuba2prt37yqRSBjS+uabb2pzc1NPnjzRP/7jP2pvb0+Xl5daXl42a0lXgna7rdFopEQiYVQ9qk+63a7R8rAcWD1iTtxdNjK5RwTH50N9G5Ag6cAzmKTZJiNlwc+4ridVSLNcpC8jI07sdDpGXPeMo4WFBaVSKWWzWS0uLhrlTrpiQ4X+H/9W0lws6dM1/r0uLi7U7/fVarWMj9vtdu19/Tvw/sViUT/5yU/0p3/6p8rlcnr+/Ll+/etf6+zszHKwuNBHR0d69uyZWq3WS1lFt8L5LY+gcEYiEWWzWW1vb6tYLJrrmEql9OabbyqRSOiDDz7Q//f//X+qVqtGWqcmczweGxAUi8VUKpVULpcttYClrNfrc0INQippzmUjbbK8vGwVH16I/Cby1Dn/d7BIGVZPv9+XJIuHuWcwx+k5tf5eILXwhfv9vtrttnGHs9msIdEeTAL0SSaTisViBvrg8lMYjhJA4EejkdV3UtpGFwVJ9l2UTigUUjKZ1NbWlv7jf/yPevjwoRqNhp4/f240yXa7bXN7fHysx48fq9lsfsaCfpPu7a1wvmQE48xsNqu7d+9qdXVV/X5fnU5Hm5ubun//vprNpn7xi1/o2bNnGgwGWllZsbgRAKPb7SoajWp1dVWrq6uKxWK2sc7Pz9XpdAx4WV5eVrFYNKifXCNuG5uVmk1v3XnmYErkOnKD7yIAsR5+6vLysiXsPQPKC6TPdfr8KH8ArNrtthqNhpWVhcNhA3Jwl4mBUV6491ACQ6GQeQg8u29dAo0PEOfi4sK8D953Op2aq838r6+v6y/+4i/005/+VJFIRC9evND+/r7xmaltPTw8NAH9Im1Svo7xMuH8ra7n9CwbeLGrq6vK5XLq9XqaTqd6++23lU6n9ejRI7377rs6OztTOBxWoVCw+I8+OJeXl8pms9rc3FQ6nba0B+02ADsQxmQyafWaDOJI8nD8PujCBi0cWj6IOOMeMvr9vqUREBQv4EH3Nwgg+eoThIDYUZrlVLGoVInwDsFyt8FgoOXlZfMOcJVbrZa5voQK3W5X0hX7Z2FhQc1m0yx0s9k0i4xngdUNh8N6/vy5/vIv/1K9Xk9/8id/ort379ozLS4uqlKp6PLy0lhejx8/ttj08yh/3+T4rRXOINy/vLys7e1tbW5uKhQKaXl5WXfv3tXS0pLeffdd/fKXv1S321UsFrMSsNFopGazqX6/r1gspq2tLZVKJesWwB/abcTjcWWzWWvY5Sl2XkOzQUOhkNLp9BziiSLxVSZBcITrBOOmy8tLNRoNNRoNI02Q5vFgDICOz4P6cjCe1dMGQXHD4bDlXkGIKfciXYJF8+mT5eVlu/75+bkxfRYXF63sLpPJmJLLZrOKRCJqNBpWftdoNCwt4y0+Subs7Ez/5//8HzUaDf35n/+55XJRsoeHhxoOhyoWi5KkJ0+eqNFo2Dt/k9bzuvFbJ5wvS5eUy2XduXNH0lUK4N69exqPx/qHf/gHffDBB6bhc7mcEomELi8vTVun02mtra0pk8lI0hw40u121ev1LGEetJRBsIbEPt8Brb3OlSUdEXQ7g2VguH/1et1SCuRoPcjkqzcQHFzGRCKhTCYzB9pglXq9nrnJuM+863Q6tRgbmh4xpq+iAdHu9/sG9PAcgDfb29tKpVJmmXO5nBYWFiz2j0ajqtVqFkvzTvyOtNPPf/5zDYdD/fEf/7EymYz29vYUi8VUKBS0v7+vaDSqUqlkFrPRaHyGAcX4JgX1t0o4g4KJhSyVStrZ2TG2z+7urgaDgf7xH/9Rjx490ng8thRIJBKx4t/FxUWtr6+rVCopkUgYggmy6MkD9AFiIFDXuZFYRw/S+IFlkDTXBUGauZXSlVD2+32LnUkFkW7AinvFgIBBKq9UKppMJkomk2ZtAa+IAc/OztTr9Sy+oxULcazv0hfskcvvYTQh/LwPcWetVtN4PNbW1pYh3tPp1BRGo9Ew5hJorE/JYPlDoasOCv/0T/+kaDSqP//zP9fS0pIODg6USCS0urpqIUW5XLZ5pMiddfLP/00J6G+NcF4nmJFIRKVSSQ8ePLC83I9+9CN1u1397d/+rR49eqTpdKpisaiVlRUDPS4uLoxHWywWtbS0NJf/A9ABAAHk8dC9Zwr5vGWwlYhvvCXNLCv5TVxGXFGsku8jJM3cVWK4QqGglZWVufynJEOUqZvs9/taWFiw5lqNRsPccVpZkibhuTOZjJLJpOURKf3is3gOkDFIt0B1XFxctOJq5igWi+n8/FyVSsWANlzkTCaj6fSqlIz49PDwUOfn57bWzD2kCQQ0m83qP/yH/6ClpSU9f/5cmUxGR0dHOj09VTgcVqlU0mg00vPnz+cE9NsYvzXCKV2frF5fX9d0etXJ7g/+4A8UDof1s5/9TE+fPpUkpdNpJZNJDQYDy3XmcjmVy2Ulk0mLsUgl+BYagC0UXLNZSEOQEiDmASElxhwMBmq321pZWbGNDljU7/fnSPRYWe/CSlIymVQymTQiOkQDT1TwBICLiwvrxEAXPu8eE4vihtJmhDga4YcdhDWmeRgupyTrgkDKxddxMufQCkn/NJtNLSwsqFgsGugjyUKNbrerXC6n119/Xc+ePdPZ2ZlZTl+RgzD/7d/+rZLJpN5++21ls1kNh0Otr6+bol1cXNTm5qZZfp5F+ubj0N9K4YSXWSqVzGK99dZbGo1G+tnPfqYPPvjAutEtLy+r1Wrp/PzcUNrV1VWtrKxImp0r0uv1JMk4rBAJJFkpFK4l8Q//pgsAAApu6eXlpWq1mobDoVZWVkwoIpGI8WWJS9kgCCgWm44GHu31ND4fQ3Y6HZ2dnaler5vb7rvzgTbzfoPBwNIhdBakYNo34uL/KysrlgKhn5AnXOB6QlYg9m61WsaPRUkuLCwon88rEolYB4lUKmXezfb2tn7wgx/o+fPnVtSOgPsql1qtpr/6q7/S5eWlHj58qEqlom63q3w+b4o1EomoWCyaMoUdxfv5efw6x2+0cAYJBtKMWlYqlQySf+edd5TL5fRXf/VX+td//Vf1+33bMFiqUChkghmPx819JPUhXXUypx8tG9LnKuHPhkIhc3PD4bDq9boWFxfnUjOSjEnU6/WMrJBOpw1s8d0PiFNxOSGu+9wf7+9jXHKrdPhDKPP5vDW5xoo3m03rz0PaYzwem7VEYfFMfiC81FOiJIJrI83670qy94jFYmq32+ad4FFQr9npdDSdTpVOp1WpVHR8fKy1tTXt7u4qm81aqViQdYTX89d//ddaXFzU6uqqRqORstmsQqGQjo+PzRva2dlROBzWixcvLOb1aZavW0B/o4VTmo8x0eK0sojFYnr99df19ttv62/+5m/0q1/9ylwZOrv5yvpyuWzuEy4Sh/x4AAchkebbjnC6F3EiMZ8kQz07nY7l5xA0qi6KxaIymYzFrX4jeGUQBJC8QLIh2VxYJrjB5XLZrD/gEq5vNpvVysqKWq2WJBkKmkqlzMXn2bGODOpgM5mM0Rp9Hag0I+Ij3JDpE4nE3HuAhNfrdeMrewtKB4RKpWK5ZHjKvquhd0n39vb0s5/9TP/pP/0n/f7v/76Gw6E+/PBDQ+VB67e2ttTr9XR6emp7QPpmUNvfGuGUZmVGmUzGCqZ/8IMfaG9vTz//+c/VbretxIreONP/Vy6WyWRMS8O/nE6ntsEonQoOz/fETSSWwyWFAYTg9Ho9hUIhq6vkD/WYbARcR1ISPAvxnneRid9oxuzrSCHVY5FxK31edWFhwXr4hEIhY9Dw7N46Q07wCmY6nRoyHo1eHWQEBQ9FgCVCwIljJdnpaTCCmGu6zsPS4pQzCrcjkYjlS9fW1jSdTnV8fDznHXHfx48f62/+5m/04MEDvfHGG+p2u2q1Wla0LV3F8Ds7OxoOh6pWq98oxe83Vjg9FY1/UyxdKBS0tLSke/fuqdFo6K//+q91fHxsv4/FYhZXRCIR665OnAWqCuSPdfVlVvzfKwdPJKCkCyvlT+8iiY8Fh9rnOxBIM0CCfCokAIj3fJbOAJ6gDrhDioj4NVi9EkS3fS8iyBu+xpTvU+Xim4Yxh1hE0NuLiwuzsnB+eS9/rihxajQaNQAO99grCooNsJCkuCAcjMdjnZ2dGXuJ9YlGo/rkk0/03/7bf9N/+S//RWtra1pbW5s7VCkUuqJ4bm1taTKZmJv/TQBDv7HCKc0LKHQ5GDyrq6taWFjQz3/+c3344YfmdtIomYWjTSSL7hFMhEaSUfiIQYk9ETrizX6/b1aRbujkAT36Si7T5y2leeYPaRU2D1A/9w42+IJVI3222x7I6nVz51lIHuyBcucphGxQYkO8FYAeD8YQo2K9GJSJDQYDK6/jvnB0fcqo3W5L0ty7IzCg01hkYmQ66fvzQvF+3n33XeVyOf3n//yf9fbbb2thYcHiTPCFbDZraR5PlP86x2+UcPpYK6jxod3hnr722ms6PT3V+++/b5qbyhEAC+IZtLUvsSJHJ8nctl6vZ1UZ5+fnhvZKMkR3PB4rkUgomUyaO+etojQ7yMcjqtL8wbG4jiCZlFF5Sh3v5Jt84T4S04HwInCMoKvmCRK4xL5dSTg86wjPsxKT+UZjWHZyrsF7wgWmThTlhALzYYKvyoFgn8vlTMHhhUiz9i+EDqytJ+p7sOjdd9/V9va2/uIv/kJ3796dK49D6eRyOSOB+BTL1zV+I4QTbR10xfxIJpPW9v/1119XPB7Xhx9+qGazaTHX0tKSuUuSzOUjee0ZPbhpnuPqW4qwadCuuHhUgHj30AshhAJpRhwIos4+LUNcjLuVSCQsfeJrM4OIorf63pUNcnW9NSUVgdXCuvp1CLrfxKzeYkuaIwQwBxAuPNoLCQJhR9j4GWsB80eSFR0wv54UQqsWKnJAfj0fV5IqlYp+9rOfaWVlRX/4h3+otbU1PX782GJdrlcul00x+Bzu12FFX3nhfJmf7zcex+zFYjHt7Ozo/v37+sUvfqFPP/1Uw+HQ4HqsH7WGJO9xdaVZS0g2G4l3ngGghFjRp1K4ps9LSjPLyx/u4V1LXG3yo2hqjiPAMvicJsMrLT9n/NxbJI+Y+hwlrjmKx6cQfAwsyUgOvqUJgu+VhSemI5DkMv0zY039HHn3HoVALBoOh43UjnUn1qZThSQDncbjsVXBeGIDlSypVEqvvfaafvjDH+rp06eqVquGSVDJRBlgsAro3zNeeeEMCqbfhGjmXC5nObEf/vCHarVa+ud//mc1m03ryyPJKGO+mx4amvQJKB9WxCfqiSvR2uPx2NIHxHYIO5vds1auqy4hqU6rDnKLi4uLlsLATfRCEty4/mcMQCHvZYBydrtdDYdDI7zDUPLFz6RuAG4QvlgsZsALnog/9NcLvn9ez34iLsfC+rX2uV2e2bORUCL8fmVlRdls1p4lFAoZKZ9nHw6HVgnjFeLjx49NQB88eGChDYhzKHRFH8zn83N9iLwC/KrjlRdOPzwyy6Tg5kkyRtDf//3f6/Dw0NxZ0FnAAdg6Pu6DEAAsD2LrgRtiN9/YmdjSn3XiC6DRtp4cjiUhQY4159lIBfl+P2xWri3NN472Gj0YP0ozl5VCZu7JGS5YQroT8O69Xm+uEJw1gNHUbDbn3E2Ulm/PeXFxoVarZZ0JIFFA+UMZ0kfIs3OCdELemXxyOBxWt9s1GibpG5QmYUg8Hjfl65VIv9/Xhx9+qL/8y7/U/fv37T24jldGFJEz157s8VXGb4RwBiF/UhUwgfL5vJaWlvT222/r5OREH3zwgTqdjlkEtCSan+ZabAqu6VtEIpQsJJuN+0ozEMKzdHBbeW42ebChMkJDnASQ4lk7uH6+xQnP591QHwujvLg+ltPnaFFOPO9gMFC9XjfgDCHvdrvmXYDGesWAUmy321b3SkdASYY0NxoNS/R7UoP3SHDpg423AWeYS9YPZJ3/Y0k5+/T8/FzJZNIsKuQFz17i3vV6Xb/85S81nU714x//WPl8XqHQVSUMdbeww0gP/VZbzuu0kkfyAEioGnnnnXdUKpX093//96pUKkY5i0aj5jJCvPabHy2IQOIq45J691Ga0QMRNMgH/vdsKs6hxEohtMSxpAWC3dP9HHjUkuoXf7y8r5kEofU9dLkO7rl3kfneYDAwVBgrSBxXqVQsRi6VSnNKCD4tyPLp6akkWTzoe9ZKV15OKpWaA21wRf2ZnQihB68Av0CRmRc6/YVCITvtLZ1OW7onkUh8RkAhgXisgQbXlNoxJ+TCEVDm08f0v7WW01sQjxwCSsCY2d7e1ocffqh//dd/tSp9NDiLL8ksJv9HYFgkNrCPt3B/WUxfFgUtDreWjdrr9Sy+wmVFIH1bTGmG2vr39G1CACN842VcPoTbEwd8Vz+vXK6L3yWZtV5aWjI3leegmRmWnfpKfw1AGNzXk5MTc21ZB9JOpJd8XSz9fLH+zLVHgH3ahcbYWEx/oG6r1TKXlsN2iasrlYoWFxdNEUBPDIWuuMW//vWvtbu7q3feeccK77k3LCXPdw5mDL7seGWF028mPwm4OOSxotGoHjx4YMXTR0dHisViymazisViarVac13r2MRMOkKJsE4mE0uDYDmJ6SBs93o9s6BUU3gBgbOLVaNMKpVK2WdAZ70wIqBYcuIqH0/6Sg//h2f3lpb5CwIzPib1v6dJNtUZpC+IS2u1mrXw9IJDzpjibpg9/qwYqHTeotIMDWTUI8dQDHHJvWLxjanhQjNnnU7HrGUqlbJuFplMxu5DaZv3OCSp0Wjoo48+0vr6unlkrBXCCbj0W51KCbqTXkDhga6srGh9fV27u7v65JNP9Omnnxo6Fw6HzaUkF4nF8iRy7oWlIjbF9fWCGQpdtWL0Rc+geqPRyLi5aHlAHVxpNmcw9cBGx+Ky+fi9b2eCexoEe14WBiA8bHosj09l+GdB+KUroS0Wi8rn86rVaup2uzo7O5Mkc00lmYUMh8PGzw2ynHxemM4NAHBecD2egJfi00C8M8/Od3HxJVnJGVa71+sZogtRJB6Pq9/vzyHL4/FYT58+1ePHj62eF0srzRQjlUfM8VcFhl5Z4fQjaDlxr5aWllQul9VsNvXLX/7SrCSVBriFWDHcSt/s2G9cFom4BrcJVBBrmUwm7VkkWaUJ9+Mz/vToIDuGuk2EDuSQ/jrkN4mvrrOIXJc5ui6l4ruuSzOyu59Tn1f0lpx5gZVTqVTU6XQ0mUzsDE08GZSPt/IoBp7bl3DROhNSvV8Pnskj28w1YYYvRPAkDFD3er2uQqGgVCol6YqoAA5BP2GvpIh/K5WKPvroI7311lsqlUp2TijxdTweV71e/4xL+1Us6SsvnEEIn0nyyfn3339fz549My1InMcmI64EpUQ42QBoYATQo53epcJFY+Mmk8m5k8ZAOz3ZHWHHeoOOehfUWzgf//lGYdJsE3pLHozHveAFvY+gGxtk+Hhryr2wGnQ1kGbgCfEc1tzHvT4VhUASV+KO+jidOM4zhZh75oG14r393BGKeKQ5HA6rXC5b/DmdXtWDUhu6tLRkrB/viRwcHOjTTz9VNptVOBy2GlissV8HP79fdryywhkUSjYQsWYsFlM+n9dwONSTJ0+sMzhakIVkwaPR2bmXfniXzve+wZqxaGhZz66RpOPjY7XbbXNpKc0KAlh+oAj8QkuyDeDBIml2xF4QXfaIYXCDYI08eOS/w2c8yuzRUZ7dkx+WlpaUz+etMLpSqajdbptHQmqKOcVr8QcMgVQTz/GMfl59LSjP7hUp+8Lnqb1iZeA+Y+U7nY6BVzQMg8Dv5+v8/FwfffSR7t27p3w+r0qlYggv3pBPl33V8coKpzQvmGxmmm7l83nlcjlr1uSZG1g2z+GkQkOSaXBQVEmGwHphCYImgBnEpOfn51Z9T4xJsbTfSB7o8JYNBhIxlrcG3tJ6RSHNTrtmbrzrh/vI+1yXCvJC7QXUo5Desge5uzCiOC4B5g/WHk+B1AjuJ/Wr/nPe+uBl+Bytt6o8v4/DuRfrxVqgaD2DiXmhNQ0AH+VmKMHRaKRnz57p008/1R/8wR9oY2NDe3t7isfjxtgixyrNPJQvO15J4fSWgM0EWpbL5RSPx62od39/36ymRyP9NYiJJJlL6buHewSV6gq/aQBivOvExgDIILEOWd7HIP6ZAEeITz0bxisFzzbybCY8AI/Yeg/B10viQpOyIOXg6ya91fKW1XsMzB8pKNaC1iRsbl/cTErECw5KxYNvXkHCmvJxahCskmZCTgrEE0y4rlfQkB/YAxyZiND60IUcd7vd1scff6z79+8bj3owGFjc2mg0zHX/qrnOV1I4GSwQE83kLi4uqlQqqVar6fj42JpvBd0yJo3v83uvkbFwvorCf8azinyTLirwYZ140jvCG3SHPbjE8Xls6HA4PJdjpRAb8IN39xbEI7AeKPLpEk+EqNfrqtVqlqz3AurnAeUU9CJw/YNMKxSDtyQwhWDVDAaDuUodhJb7o0TwTLyLyvuxRh6xXl5eNhAI4fdIunfn+T3PRT4clBxAjLk4Pj7W4eGhNjc3jY0Vj8cthqX296uOV0Y4r/Pd/c/Q0hyrkEql9Ktf/Ur1en2O58im8jEUbiML6//PpsBqsrBoaTalb1EpzVBQL7i+ppG4kI0LtQ4OKP16iDth9/jaTOYAqxssFPfWmPf2IJM0A7E48qBardqfSqViwJYHu3xczv19HlKSzXlQOWCtvKvP/4PVOjwnAsl7sE68CwBROByeK/5GefgUFT9nziVZfSdF4ZQD+nYyvmsCf87Pz3V8fKzt7W1lMhlDbfGQcG0/j+jxeeOVEU4/vAbGHUmlUiqXy9aLtl6v68mTJ1Y54Derr8SgZxDILsLFRvMujW8X6a/lhRpBIv68uLgwCxR0Z30MnM/nlc/nTTjpY+s3IMO7lj7V411MHw/7uNwLhk9lsIlXV1dVLBbV6XR0eHiok5MTI/vjvrHJfaznlRzP5WskPXLqNynv7xFcSTb3hBf+DFPOePH3YH590bhXXMyJD2V47n6//xn023OrPVAI2MOzvnjxQj/5yU+0urqq09NTTSYTW0N/jANx+pcZr4RwBifU/5seP/fu3VMul1OhUNB0OtWvf/1rnZ6emmWTZsLsmzrhGnoWDYvuD/FB07KpvRuGa4l2JtbklC2aJjP84i4uLs613JRmFoOcLMNvPm+B/fCxlc8J8l4IhXd/PV+YOYJDWiwWtb+/b3RDjljg+7w7xx/wXLjt/lkRMi9MXki8YHkXkhPcwuHwXAsWvBrqPzlK3s8Vlh5yiSTzfnzpGefHYD0BCKEC4jVBvIc8f3R0pBcvXujOnTvWYwovju/4PfsbZTmvE0wfv0iy5lTEMvV6Xc+ePTOStt+oWL7hcGhcTiyhr7T3TCDvDmINcX2IaRBin7trNBrqdrtWVeJBGPoMFQoFra+v2/kffB9h91bFp3OwgN6CekTxurjMWyusu7d0voPB5eWlWfTFxUVzdc/Ozizd4BWOt3wInX8eACCfQkJheHAtmDKB9+wtM+uDO+2PnUC5cD2PXHtvwlPsAIW86876wiOmrBDvhHccDAZ69OiR1tbWlM/ndXR0ZCVvi4uL9v2vgtjeaOH0AhnUOCCTHP8GBB6LxfT48WMdHR3Nlf+wyT0CyXdhoXgygE+uRyIRo9rRFgQh91bVu1mTycSOj0P4JRnwEo/HVSgUVC6XDRH0ltELkk+JeE2M1g+F5okVvnOgHx74AETyJ2ZfJzzEwaCQh4eH1hXd11z6DcuzMR++D6+nFnoCgu8DjCBRLRTEGzzyHFTU0sxqEqLwvqyvZxx5MgnorEfBfSmgzxD4dND+/r46nY6V2YVCIbOc1+3dLzputHBKn2WvBNMoCBf9XKbTqfb39623LAMk1cP/dEeAwgWZHIYLbi3gz8LCghXaQgTw7iVCQW8a3wEA7i5dDTY3N1UsFq1QOZha8e/Ov8lR+rIwACAai02ns853QWqeZ9SA/OKCBbsqBJ8pEomoUCgYknlwcKCzs7M5BedDA4a36h4cQqH5Q4V5P56V9+F6UBh9I7ZweFY3ihC+zFoyFx61Z6CQ8YpAx0GbCYcoRvDP02g0VKlUdP/+fS0vL6vdbhvTzNMIv+y40cJ5HULLz9GWVNcjZPv7+3M1hpLMSqZSKWslgnCCiLKQPl/JhqdqhPuiDIi30N5sjKWlJXU6HUmyjSRdbY5Op6NwOKxsNmv3DQ7vikoyJdHr9dRsNtXpdEwASb8E+9cQawev7+cFbyISiSiVSmlzc1OFQmGuZabPwUpXhPY7d+4omUzq7OxMzWZTzWZTvV7PhJt3lTRXweOfz1sxQgNPMsc99QhrOBy2vCXWC8/Ik+MRcu+F4HUEBcVT+hBO6jLxzAiPfLwfjUYtHuVQYk5v49SA6+LOLzNurHB+XvDMxkOLLy4u2vmRh4eHajabn4m7giRon4dD4yPECBjxKyda0/bfI5DENR5woFKBLgbcl9gGpUCMGnS5/TsOh0M7o4SOAVj+oFfBc/i0Bc/HxsLNRGiIOyORiCktXEIUFyAJbmo4HLYOE91u104lazQaJmzMI++O6+xjZdzq0WhkPYu8SxqJREwJEQ+iqFBMHrH3HoKP0X1seV1+lBgca00fKVJkrJPvU+Tj6uFwaOfMUNAwHo8tA+D5uV8GFLqxwsnwWluaCS1IK5MXDl8dk8AJWUHXisOAfPzHprxuk7MZFhYWVK/XTeBzudzcEekMQASAC7q5E49x/aDr54XTvyvC3Gw27eSvRqNhtaLch+vzPFguXDFfK8lnSEeQlwUgg6AvzRqL0VoEJea9FtIrNLg6OTmxeHQ8Htv3WDdcPAAW5gSPhXnBEjIn5DB5XoAZf+SD3x/MH8i6n2OGR6899ZGCbjpFLC4uWi4WhU03ey/0nN6dy+Vs7QAMKRn8suPGC+d1g5enD60kI1sTL3rhQVuT/kADI+Reo3nXi01E3g8yNxveU9DYIN61zWQyc9ciSU7XAo6Z888ZTCdQJgZ66tNAgCjeA/BpAz7P85DH9S00UU7ULqLAiKewXAi7V4Y+p1soFJROp1UqlVStVu08UmJ25h0XHEH3bUe4L3PBz3xRO8QFrB1EAaywz2Fj8WCI+d9hXRkoDx+X8h1P5cPSs1bMOR33V1dXLQfrS95+o4XTW0+4l6CFyWRSiURC1WrVNpl346TZic1s3FAoZMLFRvDDgwaATRwvd3Z2ZqkYND8blpgnGr1qVEUfITZGIpEwN3V9fd0W0qdJuC+pDI94EsOiVPheJBKx8zFRKh6wgi7nXV2AIebLW2KuizIhj4lb691FnnlhYUGlUkmZTMYsfqvVsmoP3x/Y5zWD+VCfa8Z1hJ+Lu5xIJAzc4zmpo2VNwuHZuTGU6yGwPLPPI3sU2aemPCsrFotZegRQKhQKGR5AjpjO8Cgz9vArnee87gU8nYv/r6ysWH6T2khKj7wl9NciRwnzg6oINiu5NTYibg2bhl62zWbT4hKYJ14r+5hY0pyForlUo9HQwcGB7ty5Yxqb7+PSjcdja90ZCoW0v7//GQSTzYMrToEzCoKN68vMPGMHaiKumnfp2FS8p+8H6/+gLD0xgWPtAYzOz89Vr9ftWTwRwGMDCDnzzzV9a1AUIS1PQqHQHBEkuO5Bi80Iek2sAbgA70+IA0Hes6sIB7Cc4/HYGELdbtfWxu/lLyqkN044pc9Pn2CZsJaAJrTvCE62NM+aAWEjj9ZsNiXJeKmdTsdIzsRUJNwnk4kdEktHAn8v7oNVABjyzxKNRpXJZNRqtXR4eKhoNGokBN4lSAjAgk4mEyve9sij77PrXV8sDDEy701/WhqMYYGYPwQaSwwJnnfySg6rjffhNzmdAQqFglqtlk5PTy2O9c/D2vo43OenvRfAM1yXegruHc9WikQiBoRJMwT8Oo+MNBVoOp6CT6/hQbA3wQd6vZ5dx3tMfj+/8oDQdS/gF5INh6AGu557gMX/8WwVmkghCJRTEaNEIhET+mw2q3Q6bfHlysrKXFsNj2j6ewO1SzIrHI1eHTV4cXFhCezV1VUrVJZmDZOJaTnbMplMmhXwMaffBGxK4ksvsJ1ORycnJ6pWq+Z6IZDE0RAB8ByokEHY/AFEdCzwa+ZTOFg7rHqz2bSYlPymV3DeVcRj4D2I9fxm9y6qv5ZX6ABiKKGgO+v3DQqRuUXIWRfvcnt21WQyscbYpOxwq71F98/4b40bK5zXDSYdzY6bS86QSfMLzXfYMCwQ2o2kvjQDCZhMNgedzyeTiTKZjP2eDY177BUGLhv36PV6lpP16QuOfG82m8pms1YkDmGCGAjiAW6oj4W86+Tzix6tpIUmaY/T01O1Wi3beL5nLHEmvVmr1aq5jktLS0qn08pkMkokEjbfvkDabzwvSEtLS1pdXVU2m1Wj0dDh4aHNq0dPcW291QpS/njvRCIxl58OxpQ+fuTnDJ9S8WkV727z/AsLV931yZ9iwdlP7JFqtWrHf5Dv9Gm2oBL5vHEjhTO4uEG3FpeLDUOnNCB2H7N5iptHYz20j+vjY1pcQggHuDTEEQgXyKen5CEMLHQodHWYLOVrbGQPygwGA52cnOjg4MDafQTPJyFPSs4NYaFEKQj2oBhAfGmC7FHg4HGExJtUY3AMA0c0tFotnZ2daWVlRblcTqlUSplMRuvr6wbIsE7XCSmxMK78p59+qpOTk7n6TRSLp915F9XHl4QjrAleiweTpBnjyLviHmvgM941B6HmmbkvP/OVMdPp1DrbQ1qh5M7nR1/pPOd1eU0GL0h+U5KKxaLS6bSddSJprpM7zBQm3ROzuZ+kuXjCW0VQzoWFBXU6nblzI9kcJOMPDw+t0VXQugFYNZtNq9cEmELrLy4uqtPp6OjoyIp4ybH5BDuuJ64qcSHKhQ1ME+dut2vCyTySdiiXy8pms3OuKWknSXNgEyecDQYDNZtNtdttIyu0223dvXtX2WzWLJ4HXNiYbOxIJGJCzanbfM/Ho94V5OcAQswtLmQQKJNm6RQEyOe4PejlyffMs49lPSruGUkoQJhauOye8ucVP/d45dxabx2ve3hcME6njkajKhQKxgDynFncCX8Gx3XuTb/fV71en0M4iV9xJ7GOHM7D9di4Kysr1kfmxYsXGgwGKhQKtjFI9NNbBkuUTCbnYHaPRj5//tyaY/kOAbwjrhQxLygoVhuq2/7+vk5OTnRxcaFut6ulpSWtr6+rXC4rEolYI2uvBKVZcy9fCkZ1DcAHFr3X6xmw9Oabbxoi7dfQu3V+o2azWZXLZfMIWB9vbbgWa4OwewH0Xo9nAaHMaLUJcCbNCgcQXqy3d6/9NT0Jwz+TR2O73a6R4BHOYIH8K2s5PRQe1DbSFZe2WCyacIbDYYvD0HaevIwGJjhnU3tkrl6vzyXmpRnRAYoYri5unj92bjqdKplMant7W0+fPtWzZ880GAy0vr5uuVCS0riwl5eX6nQ6xtTxC5zJZIy3CuDCu2KFeXaODeA4A9yx0WikSqWi4+NjHR0dzRG2z8/PLafpwQ+fq8U6+KoRrFUqldJkMplr/3J+fq5nz54pGo3q4cOHFnZImltLjwegjKh68YXxrBGKCIYOG90LvxcchI3r9/t98xjoyojQ0/bE523hHjMvvkjb0zw9nsF8hcNhpVIp6/roFcXL0oOfN26UcF4Xo/Bv/pByGI1G2tnZsZIr4jE2CtbRH6PApKPtfCxH7ADosrKyMte0mTjGk6s95E8e9OHDh1ax0e12tbOzM3d+CHQwYrh+v/8ZdJN/I8ReSXlWCoLOu3E+ZC6XsxYauFhck67so9HIjsDzvY/8ffwGRlHh9lNNw5H3CwsLajQa2tvbs+57PrZDCIgR/aalrpb0FIoIpcHzIxwoTp7TD4/K435LVxaaZ/YEB74DmBUk5/v0Dvf2KREMAAPF4DEFD2h9mXGjhJMRdIeYxFQqpVKpZC7rW2+9pbW1NavUgLY3Go3mtDAT6687nU4NXsdVxWWkwDcSueqy4NFVXDw2ro890JwPHjzQ2dmZDg8P9fTpU4vF/DtxnD0C6lFWnxJBKPx9PKDFhsUjQJk0m00dHh4amZ1nHo1G1o1wOBxa8QBgDsLkEUy8B/7v6zN91Q2hhbcs/ns+huPzeCQ+nYIrKMlcUuZDmu9PxPDWmHkjn4plRqnznB6P8K5tMBXj414vjEHwjXRKvV63MkRJn3F9X1m3Vvps9wOfG4QBQ5zy8ccfq9PpWKzHBmbipVkHN1+ZghCglT1CyET3ej0rhZI0x7CRZNUG/h5oTdDLk5MTnZ2d2SbBIvEd3CYEx2+ccDhshIhY7Oq0bZ4hqN07nY7xYvf39/Xxxx9bF/PrXPXxeGz9jZaXl9VoNMyCeUAJEIjDmLLZrCKRiLnJWFysDqQHDzB5C8TG9Efz+djZx3S43jyvd+098OPjT5QJpWVU1vDezC0KBuXtFRh0Rv/M13l1PL8HMbHMXIfn9grolQOEroPeveaiIbN0tSBra2s6Pz/X6empTk5O9OjRI+NwSrPW/B6OZ6I84MAmWVxc1NramhYXFw09hGRAa0RgcSZ6MBio3W5bC0xcORaHBtKHh4c6OzvTdDqdKxUjl8ozIZykPCg54v19dz8UDzlU6cplbjQaevr0qQ4PD819DDKOmB9IFyClpKUQruFwqG63awR2NjwlUXgUfp49PY7n9JYomGucTqd2zB+eCEAU70RMjYvo90lQSMnnUpSNK+u9J59XRbB9yuNlxAbuwzyi+FAmvLuvYPFpt+us8eeNGyGcQRcFLebjtHw+b+mHzc1NvfbaawZ6PHv2zBLlkuYmzlOoyE15elY0GjVE9s6dO9aKAzcX4QRsubi4mGPgQB1k03ruJ/fjqDnalpCT9IASggsNLJ1OGxnBLy4DAfebdTweG8mAzRKssGDzcU+fC8ZSevRTmuV9qQIBDMM6opCwWB5ACq4p3gsb/uLiYu5oeuYEiiCgme+04K2cNB/6oEhhcZG3DcaZzIsHbUCkQZ69qxzcn8yf31/MbSgUsiMFG42GKZXrrvF54zsXzpe5Bt6fLxQK2traUjweVyqV0o9+9CNtbGzol7/8pd577z09f/7cYhVAHawNsVrQZQTcYIJgm+RyOfX7fSNsU3yLdvXd18LhsG0aSpOGw6HlWXmPZDJpGtqXdvlcHtoWC5rJZJTJZOyz0qznjXe3PIGcwmfiQX7n3WbeNyiAnqETRG95d+69sLAwR8jHNQWBhv8LGuyVpo8tp9OpKRMf15I35Tk4dIhTstkj/vnxAlqtlqH1vJN361HYzB9eVJDU4OeDzwbzqN7yBgEr7xZ7FDzoIn/e+E6F05v4oHYi9VEqlbS7u2vd0n/84x/r+9//vvb39/XXf/3Xeu+99yydAG/Sa8MgS8inRqhoiMViKpfL1oyawB6LSDyH0GEBPIjDonq3DnQXwcfa+s3Be3OfZrOpk5MTbW9vW2uU4MZgfiSZsoBwUK/X554hCON7hNG7ez7e8oqSf6M8OL2NIxZZR0gLl5eXOjo6UiqV0urqqs0X1svfp16va39/X+12234WCoWsoJl3DIfDxs7yCoP3B2Gn9jQcDpsrTirKex5eIaLgPKLKXnrZfmXNUHhch1QblE4wiSC3lmf4t8aNEE7+zUYCaNjY2ND29rbFQT/5yU/0p3/6p6rVavrf//t/691331Wn05lrYeHRPw+yeAuAMITDV2d6FItFra+vmxvk/0AY4CQpNqVnlLDQ3AP3ifdBKKV55I6NcXFxYTFXp9NRo9HQeDxWJpPR6uqqJM3l43gvDggCSOFsTN9VIEhjC6KxXNPHbAi8R15jsZi2t7eVTqfV7/cNOMIdRXGBnD9+/FidTkeFQsFK7RCyTqdjOVjfiM2nQKLRqPL5vClXYmPYUD5mZl6pGAKRJkYGeLtOIflYH+H0wuQtnfc6XkZmpwWND0X8vvP7/ZVwa6V5EnIsFtPW1pbu3r1rJVs/+MEP9NZbb+nDDz/U//yf/1M///nPbbHS6bSi0ahR1SRdaz08cIHwlUolFYtFNRoN/epXv1KxWFQodFUUvbq6qkajYSdRdTodtVot5fP5OYDCC1sQaMCtCgIGHqSSZqciU/Hx+PFjvf/++5pOp1pdXZ1Lo3D4b71eN9oi4I00cx+94LF5mSt+73vQshYgtLVazd4fLyaTyajT6Zh1oWonlUpZ4TsKptFoKJlMqlgsKpfLSZKRwxuNhqWrsLzMD93rOAQJQSTEwAPCJQUh9vE0Lm6r1ZorJWQ/AAL6tpy+OVkQQAvuWdaUvcYe4/7s21arNQcGvRKAkH9Qr8ETiYRee+01bW9vG4H4wYMHSqVS+l//63/p7/7u73RwcGBoHFC5JJtsNCHaicnnniCftJygo/nCwoJ2dnZULBYVjUZVLBatNCwcDluVxuHhoXK5nFWZ+NiHxZFkjCQvmD5HR12iZyeNx2Otra0pnU7rX/7lX/Tpp59KkrnbVJacnp7aRkUwB4PBXL9UlEY8Htfq6qrW1tasebVXHDBv+LwHYzgzBWQ6mUwqn8+bG4k774Ey6I/9ft8E3Oct8W6o6sC6gU4jMK1WS+12e07JcEp4JBIx0r4/xAllSTgRjUbNo5DmD0jis76O1ceNXmEFwy7/Mx//TqezQ5f8Z4JkiRsrnN7Ue7fv7t27+uEPf6hSqWSWqtls6p/+6Z+0t7dntDhJFo9RcoVQUCAtzQTFdwfgfqC0WBvi1efPn+vg4MAOEhqPr7oRQH7o9/va29tTtVq1U6V8rR9/k0ulcsG7RigPT6AmaX12dmbc3O3tbe3t7WkymRhXt1ar2ZmfuImwnoJnW/LzjY0NPXz4UKlUyqwu+VvfutHnEbHkmUxGa2trqtVq5jnE43HlcjlNp1cF67jPvlMe1TzEit4aYuUg8I9GI0t7oEg98osQ8ayDwcC8F4AXfk6lDnlSlFqn07G9g6B4Ej1VRF7ZehTYW0ksIc/EZ7GcuNW41telj5CDzxPSb104eXE0SjQaValU0g9+8APdv3/f2tvX63XV63VVq9W50h0mFlfTFxSjiYH6PYUMNhCC0el05tgnuD0838nJiXXRS6VS1uE8FotZwbOkOZSW2A/LQ20fgI0vikYZoDhYxHa7bXFaKBQyKh5WamVlRbu7uyoWi9ZTlTjz/PzcWEG8B1xP4rRer6cXL16oXq9rY2PD+L9+nvw7hEIhE3q61VerVWuKTSqkUqkYGoxQJJNJpVIpLS8vz5VreTCNggCUGK4uuU/cYo+2Y+VwO7lmr9ezYnCPLJPykTTnwfjwhjXybUOv27s+3gyiwWQC4vG49ZsKor1BsO3zxrcmnEEXADdma2tL9+/f18LCgv7lX/5Fz58/n7Nm0uzEKcABNJw/kQuNT4NfhI+cIv/3PFO0LEKLNfIEZyx4rVYzixyNXrUaoZsBjcV4TzR+pVLR4eGhaVQKmYnXOEKB+56fn5u1Ib8qye67vLysN998U/fv3zeNjQt4cXGhg4MDO+Keanw2P4yYdrttTbf29/c1nU714MEDK1qWZs26AE8IEVKplKbTqU5PT7W3t6f19XWtra3ZPIEUU0yA5+IPs/XgmTR/IjgWBqALJZvJZDSdTu1n1O6ytsSXxKAM9gtKx6dyEF5PQfT9lLwLi7fFNX3+08eZhFAoS/r3+lPmfH6W/fKy8Y0LZ9BnZ/Nns1ndvXtXxWJRlUrF4j6fN/K9TaUZWMRLYzWZJB9rYN08ad0X3EIW8PEP98NVpIKBTgdU7VN/SXwEo4h3A9qPRqMWzxCL+FIommp5ovdodHWk+cnJid2HJLx0VVGyv79v1p/Ym3pTeKTSlZC1223bNMPhUCcnJ5ZmaDabCofD1izbF1p7SpwnXDD3tVpNoVBIGxsbyufz2tjYmKszHY/H6nQ6dpCTLw73IUaQTgeJAAvW6/VUqVSUy+VsrfFWCAt6vZ6h21hgD4rhsTDf/F6aj0EJk66jG14nRFyb5+C61WpV2WzW1tZ/31vLzxNM6RsWzusC6MXFqyPv7t+/b8yZfr9vrhcNiX1cQB0eEyDN598QPGhetCzxbgS1mLghlDQNBgPrYOAFAGBpaWlJhUJBjUbDLB1FtXRF8IKKdSYOkmaQPX8Y10H0PDe0OawDaPSHH35oymg0mvUW4n2Gw6H29/dtHrGe8HuJDfl5t9vVycmJuWJ+c/vcI1o/Fospm81a1T/nzWSzWQOPPG1wMpnYMfdYJZ/IRyEMBgNrmA16DNC1v7+vy8tL5fN5S4t0u11Vq1VzaVOplFEog8/NmEwmcxYaqydpToi8hQsitz7HyvA/Q0ETXrEfgmmZ4B64bnwjwhkUSr+w5XJZ9+/ft3iFQ2+lqziPPBcTQpzCQqLhfGDOBvDCwXOwmJJsIUH7cDkAONLptFlNrjWdTpVOp5XP53V+fm7HIfhYEwSTnKjvCO7TOH7B/fAIrzSLT0BvsY7SzKozpwhGLBaz3kDtdtuuxf2azablTUETI5GI5R1brdacAvSpJ65DzAUJ4ezsTO12W6lUSul0WoVCQefn52q1WuYyBsEhXE+sPQXnsdjV2ZbpdNpAnGq1qoWFBQ0GA9XrdSse4Lp4Bayxb7LG+gcFAgPBXGIlUe7BXKafR+aE6/vwhz8+BECJ4+r6cR3xJji+FcsJ+rexsWGH3E6nszo+wBVOmGZS0LK4ieTbCOL9sexo2UajYdQxPzyRGmvL9XDxqIDwizYcDg1p9IXGgEWTycQ2a7/fN8vB+1/HCuHnLI638J7mJc2frIVV8MltlAS1i5SfBel7/pBZz7GVZALluxj6lAfP7BP4qVTKOuy3223r9k7vYFBbLDGeyOLiVYPufD5v6LoHVAqFgh3Zzn6ATUQsThyNRfYVRcxz0GryLp7nSnzq02+e0RQUniDqirLyqTqv/CG+825BJeHBoevG1y6c11lNBPONN95QLpczyhVJdahhTDIVEcQd4XDY8mrn5+eKRCKq1+sGzeOORaNRExg2IeAM90F7I4SeYUKdo59Ez4+UZJX7pC4kmWvH/3HfiEdQLv7aPgb3jJWgtud3FFJj2fBERqORTk5O5nJy3rLiuuHS1mo1Q4YR1uHw6rCkfD5vtaP8nHt6RQKgA4JLvMjcQFzAsoAlgN4itKQxALU84k66iDpY2pLW63UrBSP36hFT5hamFoo4OMfe82KfoMCDxBhpvoNDMFzxpBP2MP2iEMqgd8R9vjO3Fu20tramN954Q+Vy2R7SHxjjD9W5vLy0hUFgsVoUJlNIjWtKvBEOh+14dNBgNuVgMJhLvKM1yc15PqVfaBgonGLW7XYtTeOBHSx2q9Uyri/x4ng8tng2mPfyGhqh9ELKv70F9XzdRCKhyWSiXC6n1dVVvXjxQk+fPjULQIhAgp+Y2VtuLBP0N1947eMlfobyRFH4YgCsNrEXFp/c7erqqpH0AaLgILOGp6enqtfrdhzh5eWl1cK22201Gg3DCFAExNcM3FKey9dvSrNQCwVKHBukgHp3lXXzcSoK0N/Tp/sIQ7xcfNHxjQknwlMsFnXv3r05jihCxuKRlI5EIpYXZNL4ezQaWYd2NhaIJRo0FApZH1oElqR3p9PRdDq1RDfIHywhzyoKIpbe0nMUIHWCECUAljzogYWW5isVWDAWy28Ev/B+oycSCcsBlstla0uZSCQkSaurq/rJT36io6Mj/Y//8T9Ur9dtPXySH5e8UChYsyyS5sPh0NI5Pr4HXPEWBYtDbjYej9s64L4ijNSo5nI5lUol1Wo1i3F958ClpSXzkNLptCGeHkWNx+MWI9NnF+/I0wGDKQ/fII3PBfGMUChkSHawnaUXML920syqIuyEW8yvVwz+Wt9KntP7zx61SiaT2tnZ0cbGhsHrpBQikcjcYau4UZ70TFoAJI+WkmxurCjVI7iP0uyk4kQioWw2q0KhYBYbN8q7I751pK+ep7oAgVteXtbOzo7W19c1Go10dHQ0J9gIASkYz4IiDolEZp0E2EjXARdeYBGoZDKp9fV1lUqlOTbM0tKSyuWy1tfX9U//9E/m6nr2iyQj+XtlSZoB99MLo0ciPcrK+0hXXFiuQf3i6uqqSqWSsZJAzH19pQ9ruAeVJ7wf8T4WmiP2jo6OdHl5qVQqNVf3Sc4bYQY5pkeRLwcjbAIIA2jyjC72Jmvkc+0e4COMIsxiP3svMQgK/VvjaxHOILiBxSuXy9rY2DCN7WMECARYRe+vA9cXi0VzoUBjsX7FYtHcpXa7rcPDQ6PVMenQtqbTqbLZrN3PbwRiOfKc5CfJzZHvJH9ZrVZ1dnZmXb2J04hrIZLTu0fSnBZGQFk8Hyf6efTxDqmGnZ0dra6umlLzqGe5XLYzWHDjsbYeOV1dXdXW1tacy+vLwHyda1AYPYDBvaUZUR4vIZPJfCa9A7GEw2X5LK4o7mur1TK3fTyeHUnokWNAll6vZwXp5+fnpiQQZubUV9igsKQZNgBBxANBKEvfFgYvw6eK/O94bjw4ngHm0XWplM8bX5vlDAbRmUxG29vbdgYl2g3KHXGk9+/hcvLCEN894rW8vKzNzU0jGZC0pvKB496JT+hq4E8r9loSjdrpdIzEDeOGRcAak9M8OzvTeDxWuVw2xHltbc0sMtYjl8up1+sZxY654hkAvYI5MA/iMBfdblfHx8fGMsJVhRTxxhtvKJ1O6//+3/+rfr9vja19nMXhTz4287TG6xL2Pr/nAYxwODx3YhhKIJ1Oa3V11dIaeA6+BQgKhesDpHGCuI95UQC8Bx6RJ/onk0n1ej212+05JhBriOWSZmQD72ISMtHPCZfXCyqCDqDolTxuK9YVL4q1Iyb2lvOLCOnXGnPyMul0Wnfu3NHGxoaWlpasrwtxIY2SeXjiF0lmMZvNpi4vL01Aw+Gw0um0dnZ21O/39a//+q9GPsft4zRpYrFut6sPPvhAz549U7/fN4AI95fYCivW6/UsViAu9nSxSOSqgfRwONSzZ890dnZmmwPEk7QQgAUunedlAqDwcwSTueDfuFiggycnJ4pErg5XIu7FJazValpcXNTHH3+sy8tLFYtFizu5lq/gQEH4GMrHZcT6wYR90LL43HHQovOuoVDIYmOujaXkvgB2nBxOiZxHraHq1Wo1E4xWq6VisahsNmu0RElWpEBogbD4lAvvCE7hWUXeWyAEIswBrKQbP8IZDoct58wpbigRv7ZfdHytlhPf/u7du3r48KGVVPng3HcU8CgXwkkK4u7du+aSciTC6uqqut2ufvGLX+jTTz+1azIpuHa4QIlEQr1eT6enpzo+PjZEkFwllgWQhFiFhfANvLwLCvJ6enpqNYvA8ePx2GLlbrdruTgQQTQ5P/OkBJ/vxALgcrMx6ImLFZ9Op0Zfk6R8Pq/19fU54QLI4jv+PXHDeDcPbviEvtf8Pgb1a1gsFm0Ng9bZu3mAJn5fANLQ8SIUCqndblvLU/K09OFdW1tTNBq1XrfZbNYUrDcAfJecKvGlNLPGKD/PZcYy4sryXhQ3eBcXBZlKpbS9vW2nqflmY1823pS+onB6pMpbhXg8rocPH+p73/ueCabXUrlczjoJeKK518Qco7a2tqYHDx6YFl1YWNDh4aH++Z//WY8fP57Tbp5QAMEBoSiVSlpdXdXx8bFOT081Go1UKpXsZDIWhTwb/4ftg+bF9fKfpeqdptYUYGMVWBisNALvc1++KsYLpzRjDnkvg3adodCM6UIuks2C9UIgvJvmXTRJcwoimNNlQ+P2BlMB3jX08Stgi3fffW7Qp6pwdX2oAyrfaDTUbrcNrMI6r6+v2zESiUTCaj5B+hFYDniCDeUJ6N5j82QC/zPmHAEGyaV4gfUkNwyBw8e2WG9vOb+o9fx3WU5egJzea6+9prfeesuAF5DIwWBgMZ2vkvAcVE9APj09NQ1fLpctf/j+++/bgbNMmI8Znj17Zsl0cmSRSES7u7s6OzvT48eP9fz5c9XrdTvCLp/PK5/P29khCJ+3eLg20nx3cDwFKkDYZKFQyJBBLBT/xroinN5qeguGNmaxUWR8zsc2XAtLwmbke8TYpGewTDwTVtcXEfg19qkJ5oBnxO3nbwSAucc6xeNxEzKAIvYPlm4ymaher1ufXFB7PINYLKa1tTWrUsGrovAgmUwqk8lYuONPUZM0p4wRFtaT/Yp3Ick8F9JFKAH426RlUIi8M24ycxIEg76oe/ulhZOF8Tm6lZUVvfbaa3r48KFpczQqC5pKpUxjAf1Pp7MyHW9Fe72eHj16pMPDQ5tQ0inc04MVPEur1dL5+blVj+zu7iqbzeq11167etlo1HoB4eoeHBxoc3PTaiQ5qg8L5a1JkMqGNvXlQaDLCI4HWDxA463Xy9Io/t38XDJQHv66dGsYj8fmzvlreMFFSRAGYDk84OGVhGe5IFi+jQgUQJScR+99xctoNLKfeSvPRqaGlkoYYm1Sad5dxxXFCyF1gzsMaIQywPUll+lDMtbRW02u0Wq1rLgenAI3F3AMRT8ajYxSiZL+KuNLCafX7GicZDKp119/Xffv37eUA2AAG2QymdhLoV08EunzXCw8fFcPibOJiGODjB4PEvz6179Wv9/XvXv3rDA5n8/r4OBA+/v7Ojs703B4dfw6NaQ7OzuG4FI7iNsCmuxJ9IBAk8kVv7ZQKBgTZzQa2ecpDPcCyDsF82nB5LYXYP83VtQLO7EkpWBYReYKBeHX0P9B4QXXnL+92+3dcmK7Wq2mXC5nABlrxbvi/uPyUrzuLTfhA3nso6MjHRwcWIiC8KCsx+OxHeJLaENKiDg9EolYpwnmCs8Hq4kbijLl/UajkRqNhrHOyGH73DTpM9KGdLPgGb8V4fQLhsV8+PCh7t27Z24TgoCv7ZkbwQXH12czs9EAefxnEURKnhhoeI96Sldc1Pfee0/Hx8d6/fXXtba2ZtUwm5ub1lemUqnoV7/6lQ4ODux0aZLo6XTaYmDydJ5Vg/bkMB80KOwWergCAL1M0IIxfNDl8RbUK0iugcCg5KjrZKDJfXUE1jOYVw0CQR4o8p/1z4j1brfbqlQqFof7awXX1Ftrng2lTFqEuDMSiViK7fz83Iq+UYoocE6SJpdKSov0HYLt40zmO1hUgHDyDJPJxFJR5MN97pR2LqFQyPY8BJnrPJ4vMr6wcHoNyybc2dnRnTt3FIvFrGKd47YrlcocIwOXYjAYfAY5xIXz9wFU8NaBn+Ou8V20dzC4Pzo60qNHj9Tr9fT7v//7KpfL1pg4FLqi3sXjcTvaAZ5mqVSyDvOkX46Ojuw9fe6MRSTdwubhlDIf2zA86hmMNf0CBn/nLWswDeOBCBBKFCaIri+pI03DcwTvL83SHpI+swYoVX/Wy3A4tELjUqlkcSpelAeFfFsS5gj3sdlszqVnVldXjYKJ1SNOHY/HZtlQUJLmCBVYWuYCxcCcSpqz8swvyoaUHkCQJLOMeBv5fF6rq6tzDCW6bzBn1wnldfPO+ELCGRRMAvM7d+4YZJ1KpZTL5TSZTHR2dmZuo6Q51BaXAE3JRHnNLck2Ea4G7q+3tEwki+W5kmwW4h+OQqDPzosXL2zxC4WCJOmjjz6yVopsoEajoel0dsYJ1tKfRMbke85wMpk0wMRXRARjSw+4XDc8ivuyxfWoNT8jrwuJ4vz83GJ2hMnPN4CSfxbe13+ed2UfgDEghHRqwItiM/Ndb0F5XsIUNnwikbAcMa4uLituLwqYc174DOBdLBZTPp83gQbBD85dEJllPlutlh3jCGMM4UTJobA4PBmkWZKFPMFqpOAafp4V/dKWE/99Y2PD4gvSHRCS6UmKJicWRWv5xWFCfF4JAUCAg9A7VSEMr5X8BHN+5/r6up2BwonR4/FYxWLRKiOi0asWihDlobSdnJxY3OBznXzHKw3YSoPBwOaGoxj44xFW3of5DVpZH9/5xfTfZYPxc8CWfr+v5eVlhUKhuecilYNF9BY6eP3g7/1a+bVA6WBFms2mnj9/brld34TNV3Z4D4Rn4xgK8on+/BhfTwt6SqcKzltBcPDu6NjH/QCo2Fe+sgQA7/z8XEdHR6bQOSYxqPjpJgHBn9pf0mi08fRz+WXGl445o9GoWclCoWDgycnJiZVeeSaJZ2T4dAmbjgVn8+Jy8Xk2G6gek+r/ZoOzkSC8P3z4UPfv35ckVatVazrNhNJAmr43b775pqF8KJXt7W2Nx2MdHh6q2Wyq1WpZzamvO4Xu1mg09Pz5c1vUhw8f6uTkRNVq1ZgiweHRUM8c8uioH37uCBcQGn8aGQAG9MVGo6Fut2seBfMdLFSWZqQDP7/S/Mlb0izmx3KigOr1un1/c3NzrgmbT6P5+eAsHOmKysc8QJagEgjmjbekCD1CHgqFjOziUVnez6eTmDvYV3t7ezo5ObGYNpVKWYrIF06gPNLptLGFiDV907eXeT3/1vjCwok7Eg6HrdIDnmalUtGLFy8UCoVUKBTmErIetvdaM+hO8PPz83NJmgOJmIhQKGSLzLWCRcYICoJMY+JKpaLhcGgV+MPhUMfHx+a+wrahYRcxyNLSku7du6eFhQU9efLE0jW+pAorNB6P7SCdfr+vtbU1o3NFIhHj5HriAcODZPzfJ8v95707HIxBqQQJ1loSD1Nih1fiQRAUoZ/P4B4IEhh87Mp3cOXr9bqeP3+ulZUVlcvlOVIC4BTz6FuMQJX0jcposYlQE9eT6EfpoNB84YJ/ZkIfT75gjzabTb148UJHR0eSrrjI0AkZ/l506Kd8D3CLbvcoia8imNJXSKXgqtCSkoQxC55Opw0oCCJf3jp4cMBPEJA4QgopwBMYoMFFo1Hb/HzPx3cffPDBnOADIuDm4Xr7zUK/XIR2aWnJ4kdaZrBBPFHBCwttKgeDgYrFopHBSVpLM4DFb+zrgIogKOTdduaM71I2Rp9XSuBwbyXZEQVBGqNPH7B5WRtPnmdup9PZ4bYoWj4vzXq40p0+mUwagoziQyHw/Mwt1g5BDoVm55iGQiG7DsLoPSxvGcmRRyKzru4egPRKslqt6sWLF1boT8OzeDxu88K+9P18C4WC7ty5o8XFRQOA4JG/zPP5ouNLxZy8CJaRn5M0rlQqarfbRo1jMoktPHiBS+URNQSsXq+bZgdkYVOwadi0xL8AAlSQkD/jmAWujfLwlQowVxAsThgjJsHlQdmQV/Mbw28y6UqpnJycqNPpqFQqGbOFmAjty2e9O+vJ+AyPMnqAiDmJRCJaW1uzWk/6ArFe/IEvDAgGssh7gpAjPD6fhwB4b8WnJLBQ7I94PK5yuWx5T8AyPCCQZa7vQSkf63ovC8/DK1jfBgZigO8ageJiv/n4PxwOq9Fo6PDw0JQZiivYdgZ3ejKZGHVze3tbGxsbltMfja4aAHjw7auOL2U5ESgeQpIxN4jzOMSGk6V8ewkaDXsSAkIDQtrv960pND4+RAZvJVgwmkzl83lJMreSAa+V1pXHx8dW6gWAwMZA22PlERC/oFhwNju0M1+8DSAhyXrJ4DXQViWdTqvZbFp9qzSzSN4FQyA9mOHjNVxyajVXV1eNXXN0dGQbDqHCmuVyOTseT5p5FqwzG9eDQcy93w/e2vMs6XTaLDh84+l0qk6nYwQVUj3E7d7j8YLu0XuEDgEkfeL5xv6gI6p3eFbu4+PpWq2mg4MDK/JOp9PKZDJzRfGQCcAVQJB5T9zeaPSqfSksNMY37tZ6Bsrx8bGeP39urgY9YVutltbW1mzR0MaAR8lkUsfHx9ZIC0Env+WPNcBlJB7xoISnyxEn0pe12+0a8dmnWDzXs9frmQWF98tG8KwjFtF7DQiwNO/mw60k1gaJhixdq9UM6ab/ajKZVKPRME9BmlmGcDhs/M3pdGoxOJsYQSX/JsnQYf6NtcAt9m7gycmJtfz0XGEPrrHmKGVAsGBqBxeS1puFQsEKH+D6kk4DsMNj8q6lD3UYUCHBGrDq3DcWixnY5tNrw+HQQCniQaiLrF2n07H9iBeGYOI1YPG5PuuSTqf14MEDbW9vm3wAhNXr9WtR8C87vpBwekifB/jkk09sg8HKABDhlCwsH7EdPjkCgjCi5Sh6xvJMp1MTRGh1aHvcKRat0+no5OTEgnCqI1hwNhgCRa0jqRM2AZuHDeLTJ36j4oIisL1ezwACFAH9hjz/9Pz83GIa7h8KhSxe5kBfLCylZ5VKxUAehAjLQ0ldqVSaQ8khsksyK4BFoot6Pp/X4uKiWRyPE3gvwueqeQYsFIo0k8moXC7Pdb5AwfK8PiWDIKHkievwJjwghFAiOHhfuLJ+b4Huj8ezChX2EN5Ps9nU4eGhxYac7ULOVZKFYwg9702F1euvv647d+6YIjg7O9PJyYn1DfaK/asAQ1/KrUUb9Ho9PXnyRKPRSA8ePFC5XNbS0pLVW9ZqNfX7fZVKJQNrOp2Onj59ahrbo6NYyl6vp+l0dioYFoMYwB+Mw0YHWUVbc34kFoJ4st/vz+W26FE7GAwsT+vjSo6080wWFs1ze72l4X5YKSwKlp93QvgpxwqHwwbHMxeRSETZbNbi1GQyafA+38/lctre3p6LA72V8TETG53c3GQy0eHhoeLxuNbX1+cAOZQxc+hzgWw0BtYcpeC/g4LCI2GDB9Mawf01mUyM+kjy31tQnwbC00KAUIgwhHAxUQDSFRHl+PjYqkxSqZS1sfEAnC8b5LnhS3NODGtNwQRtcvDEPLD3ZceXSqUwgLufPn2q8/Nz7e7uanV1VcVi0ToAUJVOqiGRSOju3btWoc6LkvsDnMESs3jEdsRVnveJRmUhFxcXlU6nrYcPvEZPvpY0FzP7XJ/fRN41RZCk2fmSfl68q8T/qenEDeYzcJLJ2XFfH4eCCC4vL6vValnayh/hNxwO9dZbb+nBgwc2z3Q6wBpnMhnl83nrHMD9cN3q9bref/99nZ+fW0MtYmPewQM0WD+fGsNiSjIqH/PKfEmfBfM8Uo1w8zfXQ4AKhYKRRSTNrYuP96UrZJp5AIT0bnur1bKyQVxZ300RZcmz+lpUeNPr6+t2ZixnmJ6enqpardpaeNffK7MvM74S8Z2bkitEE6XTaRWLRauIR0MjcHROq9frc8E9x9shBGgukunEXhAEuLb/DERp4kuuRVEurhILibvqE/C+ikOaJ92zYDwnwyOU/udYDeJqXDGug/uN0qH8zLe3aDabisfj2t7eVrlcNksaj8fVbDZVLBa1s7Njyu3y8tJ6LnW7XaXTaW1vbxvbinssLS1ZCqpardqRi+VyWaVSyQSZ9wPxDJIGsCa4mgizR3aDqC7Cz7z5VI5vpAbIyMG9nmnF96PRqDGfmEfibhhC9JqaTCaq1Wo6Pj42gA5EmRCIFjO8G8/I2nHtO3fuaHt720DFfr+v09NTnZ6efqbrwbeW5/QD7TmZTOyQH04Lg71P6xBOiEIIfCOlSCRiR7xDFob4zMtRMYA19LlTBIzf+cJW8qegqmjxYMzj85ze/fQT7Mt+PJLo3TT/PVxWacYnZt58vSFKBJceV4pnHQ6Hevr0qfr9vrLZrBUYcK4m92KefNeAxcVFbW9v22FF3W5XvV5PtVpN6XTa5o9nQZF5lBikmniXZ+ZdeH7uS9jBukgzji7dEny8GES4vWdEvB7kYksypVev1y1kAaihcigcDtuBUOfn5zo7OzMFTiqEOfQWjv1AuiscvipvW15etsZ1eDGERo1GQ7Vabc5D47pfdfy7hBMhIUbD9261WkYl29vbM42M1vfxAgvv2yECk7NR2+32XKKfxUEw2FBA+TwLAoir4nOIILce5PECH3xXDy6xKb0wBpPa/thxfi9pLtbu9/t2P+YCq8PPp9OpWX2S291uV+Vy2UqnmIN0Oq1Op2OdBnHzW62W6vX6HD2OlJNXGj4f6q0Gw+dYmdvRaGTpEHrHQhD3bCc8Le7DnHjXdjAY2Bp6ZhgKGeYT7ifEFz/P0oz2Ccp6fn5ujaxJl6BEPErMd8mPQ5qPRqPW+2pnZ0c7OzsajUaqVCqqVCp2gBRYB/Pp3/2rjH9XmxIf7PrkOxaSWLBSqSibzWpzc1NbW1vWEMs3fZpOpyaIo9HIXBxAFuIHj6pKV1aJk6CxoNKsJQVxks9zgcLxew9QeOACDY8ikOZpdn4B2DAI+mg0srSKj4+YK7SzT8J7Qfb/BnVEAEBBURo8B5ubnr6kPvBUotGonQRGPOzdz6DrGQQ1fExOx7xGo2ExKookHo9btz1QVp/S4r14du4jzbrs8TzLy8tGmcO1nEwmc0cnAH7RMd7vTyihoLL056Vih33FmlDkQAosHA5bXrNYLOrhw4fKZDJ2mnelUtHp6akqlcpnKlC8jHyV8e8WTq8ZvLvoNw1uFgn5nZ0d5XI5y1dJMxSRfB5IrUc+o9Goms2mweZsMIpzJVleDcvmk+Q8M1qZDUtFPooAy8v7sGl5R6+UggCI/4wXVn7uFYS32C+bX94Bbe47yfmY/OLiQi9evFCpVNLu7q5WVlbmisAB8SAFeBCF+xN/B7W+X2OUBukwNnE2m7UuAbynT0lh3VlrFGaQ5zuZTOa6ZfjeQmACEFNIrUwmE+vlFKweIk0HFxbwx7vcPs2FEgQnwF1eXl7W6uqqNjc3Jc0OlhoMBjo5ObFQDIX5VUEgP/7drTGDGyvoDvH70WhkleGXl5fWOpOND8q4vr6uer2ug4MDsz5A3YlEwopxz8/PVa1W51w+Fhqr51MDfqP7mJFnRFg999fHrKQE/O9YCE+4lmZW1RP2eQaIEWzEoID4f/u42P++0+no6OjIvILV1VUTxo8++kjxeFxbW1sWOpAjPj09te/4efHv4i28fx6PYnrPYjqd2iG6uJ1e0FBQvtKIPC7rwbyCaiMsCKNHfrGA0WjUaJRYZFxyqIscsIs3Re0u30MIfb8f8pqsI3l2cBEODiZ88BUoQQ/o32M1pa+xqbS3JkGXz2/68XisSqViCV+QS5C1crmsu3fvKpfL6cWLF5ajAynL5XIGoLx48UJ7e3tWN4fbsry8bKCEb0/pkUesGRuV37FoCBsLz+Yh/vBxdjA1wPuzgZkXNn/QHQ3+7efUL7YnowNwUGuYyWQsvvy7v/s7vfXWW9ra2rI1yGazCofD1qkBnq0HbhAG0HF+R0rKEwDACcLhqy4DtVrNuhkC4PkUGa6odCUkwbQFc+7XB8UGKIUw47J71hQCjrDTf+js7EyXl5dWwEBnC3jYkF8wGh4JRikDBBUKBV1cXFjfKBhGNFR72Tp+1fG1dnz3QAET6y0pm2wwGOjo6EiJREK5XM4Cd+lqEcvlst566y2Vy2VrdzkYDMx1wpXDEv361782uP3111/X9va25Tbb7bahhLjIuHsLCwtGQieOQ4uyyGwUXClSBvTJgXnj6YEkxaVZxX4QIfYW6t9aTO86s0nH47Glj1qtls3txsaGjo6O9P7776vZbKpUKhnHGYXnLRRN16bTqbl1vAv35Hm9p8Efit7Pzs7sOA0Kk9nk3oX3HkMwPFhYWDDXmPnnuzwnxAoUJ8IJNkDdarVaVbPZtKPsOQF8OJx15u/3+3bgsN+jzG88Hlcmk7EUTS6XM7oliv/09NRYSF/3+NqPAPy8YBgrSiqhWq1aTAGQxGbb2dlRKpXS2tqaxaoIJ0n4er2uw8NDy5Our6/rnXfe0f37923DgW5C6ep2uzo6OjLhxB1mY5CkhoiP+wwwAEgAxO8JCDCN+v2+KQCfovFC5l2/68bLUD7cStzmarU6V2lDqiUSiVhODzLC6uqqjo6O7IhElAtCQDkZAiLNGqshbKwrbjGUSooVnj59qnq9rvX1dWUyGbs25AsKI8gB+7iU1A1z1Ov17Ah7BITPkMvms2AQ9XpdlUrFQh4olRQEsN7UXVIpw3GRvV5P9XrdiDPch7pOnqfValmsGQTzvq7xjR07f93GwpXEmjQaDZVKJWu6RTw0GAy0t7enbDZrkDflaKVSSblczvx+0iy5XE5vv/227t27Z25vr9czvmOtVpMks6gLCwtGDgfZLRQK2t7e1p07d8xlff78uVKplN58802Vy2X9wz/8g54+fWrJa0mG7lGeFo/HFY/HVa1WzZXzKRoEkxGMUfh/EIjh57wDqDPxGRTIy8tLi9M5AAnPBLJ/NBpVOp02sMSjlR7MQgCIFX16SrpizVBgT2uWi4sLa+iMp4EyzGQyxkOl2wFCgAKAXcXBVxQWcIBT0NWXZCd2U/1EQTzkflBehBjgJ5fLGXWvVCpZvymqogC7yuWyKfpQKKRarabDw0PjSvsQ5Ea6tV90EHPhs6+trc2xenjRfr9vLSKCjZuJI3K5nEajke7evav79+9rYWFBx8fHevr0qaUd9vf3dXp6alUhdNeTZhZhPB7P5Qjhc1KVEAqFVCqV9NOf/tSoW5QV4T7XajUr1M5msxqNrs7vrFarqlarcz1MfYwkzQufX1wW24M2HkUGLZxMrnilpVJJ/X5f+/v7Fv9Np1Odnp5a3NxoNGzuqOIh7qRCR5J5FB6B9WQErNnZ2ZkkqVQqaWtry9JhWEWuDXiTSqVUr9d1cnJiYIs/xgFwhrUgVpRmcTehBwwz3E2Ob0Do8/m8dekjXqzVagqFQlZiB57Bnshms1Z4vbKyojt37iifzysUChl3/Pj42AqzvZv/dY5vTTi9tuP/uC6dTsc690Ev81UQpA58w19JFhvR0Hc6nVrTaKouYrGYXrx4oXw+b0cHEi/6ImliFiwPYEgul7Mm1b5tJtqcdMadO3fm2iECJlSrVTun5eDgwHrg4i4G3SAfpweF1MdiWLSVlRXlcjnLG29sbGhra8s2KtS1YPlTp9Mx5LLf71usDBKey+Uszqbfq8+LUkzPv0l7oTSx6JAG6GXc7XZNUABgUFxcHyCN5nHEtT5nCs2x3++rWq2qVqvZIUeAdhRYjEYjiz/p7pfL5VQsFq3PEC1dX7x4YYypfD6vH//4x3rjjTfskF68qU8//dSsJs/2dY9v1XIGzT4LASLK0XsghkycP1364uJiro6TWDOdTqvdbs/1ngW93NzcNHYMm983aWKjg9Dh1qGlY7GYtra2tLq6qnK5bH15SRsASuDqpdNplUolLS0tGaGcDVGr1XR2djZ3JkhQ836ea+SRYawfbRnH47G56hsbGzo7O9PR0ZHG47G10aAogD8Q6nkOTvimlMyTzGE3STPusCRTgrjMgG8IH9YWL+Po6Einp6fmakO/Yw2oKoH76lNQCHC73bZ+TvztUyyw0STZvphOp1Zz6rMFCwsLajQaevHihZaWlrS7u6vLy0s9ePBAv/M7v6NYLKZWq6X9/X29ePFCT58+VbVanUtHfRPjO3Nrfe4PWJ5KAmJCavKo0iChfHFxoVqtpvPzc4P3mSzyfhReX15eWtI5yAry5ASeazqdfobpwYLjigNIBFtPnp+fW4fw4XBo3cEhOvB8mUxG7XZbx8fH1soES+pH0M0N5tBg/KAIsOQAbUtLSyoWixqPx8Zw4aDZWq2mk5MT7ezszJ2AjfX3lTR4Ar4ek5QTSXo6GyDQzBuKkDK7brdrTaBZCyw7FE4EFW+JEjp4rLjiUObAKyiuoE0O70kjcwozSOWw1qxZPp/XH/3RH1kN7e7urhYXF62/0OPHj/Xxxx/r+fPn17KBvu7xnQinBxZ8Ql+axWLEcYA+k8nE8lLj8VhnZ2dqtVq2CL1ez+KDpaUls0xsFL9hfIkS95RmKCQbxT+LNGMfeQGBPME5KycnJzo6OtLJyYlCoZAdvuNPXvPMk1Qqpf39fTsZy8+Ft6DBdBTKJRQKGbKcz+ctdl5ZWVE+n1ckElGn0zGy/LNnz+xcUawZAkKc2Gg01Ov17GgKcrkg1cSQQWICQuCBHUkWRzYaDatiYi0IJ1BgniDhyQm4vc1mU/V63ZhiCOXi4qIymYy2trYUiUR0cnKis7MzTSYT85poAuB5uQB5u7u7+qM/+iNtbGyYyx+NRrW/v69nz57p17/+tZ4+faqjoyMDzRi/UZaTEYSf0c4MeLlsvtFoZCU6tVpNnU7HNsbi4qJVbRB/oNl9rs0DMj4G9jlIaYYss0k8HZBN43vZhMNXTazfeOMN5XI5Kyz3R0fE43FLtvO+PqdI638/N9eloxAo4k1SH9Pp1CzJ+vq6tra2tLe3p+FwqGKxqHw+r2azqXQ6bXQ6X8CMYqIxeDabNXI4ioL59KEJ4YHP6fI7rt1sNo0UgLDzfrjWoOzMAUCVd11pOwnziZgUz2BxcdG4riDSyWRSm5ubKhaLGgwGVhB9fn5udbE/+clPtLm5afcD8d3f39cnn3xiKSL/ftetz9c5vnXhRPsDuPiWl56GBbGawmjKotg8cBuxbNTwAdGzARA6NLx3p4NurTR/arMHZnwuzseiWPSDgwM1Gg1tbGxod3dXd+7c0fPnz60gfTQaWZE1wBck7VQqpdXV1Tk09/OgeeIkYttsNmsVJuR8l5eXzd0GeMOFxspIswZozDeuZ71et82OAvRzh9WF1HFxcaFsNqvFxcW5FqU8Ewfhdrtdi199jeRkMjHQxqPK4XB4rucuxwSyDt7dJo1ycXFhbS2Xl5e1sbGhzc1No9v5zvD37t3TT37yE62urpoSp8b16OhIe3t7Ojo6spj2mxTG4PhWhdMjjZHI1clRm5ubyufzxvwgzgSUwdUit0cSG1ofNXzECQgzAuUn07N0PL/2ZXxS/9w+/sLq+NiYDgUALffv39edO3c0Ho/14YcfKhqNWgExz0KqAfc2n8/PdSwPzh3P5V0+wJ7BYGC54NXVVRMI4vJ+v692u63pdKrd3V2Fw1ctISnvQnGB2pKsD/4uSOaH7wqDizpJXN5Op6ODgwNVKpW5s0qoH11cXFSlUjFCgKfPeX70dXznYNEC3fg5MDcSmbULpSsez9rr9bS2tqYf/vCHhna3220dHBzoyZMn+vDDD/Xs2TOz0sH48tsQ0u8MEKKu7t69eyqVSopEIsZRlDR3hCBuLday2Wya1qU2z3NdfdlR0F1l+GoJH3P60qVgjpHhy6lILVDeNJ1O9fjxYx0cHOh73/uexTvEZJLMsvFe0tVmpRSJUiie23sbfJb4Gze10+moUqlocXHRmE5UcgCGTadTbW1t6bXXXlOpVNL7779vfVhJU6G4grxhjwn4sjoYOXRU7Ha7VilCK1LKqRCsi4sL69h4cHAwV6/LdVFQnjiPJeQYBv7PfpFkzc5Ig62vr1vzNwDCXq+nZDKpH/zgB1pbW5N0RWJ48eKF9vf39fHHH+vTTz+9tj4z+O9vcnzrwhl0j3BxmARQS0qSyNNRxI1VzGazBuEDGMCP9YXZ3q31B7Wy8L6VBs/H7wErpJmw8ln+z2ZCm0Pha7fb+vjjj5XJZHR+fq6DgwMtLS1ZWkeake7D4bBRy3z1i/RZC+4VBoM5Wl9ft24G1BvCI11bW9Pm5qZyuZzu3btnQBTATyQSMZcRV7Tb7c4dmQihAe8Ba40S4IxTOiOgWLkmABG5adxVBIr1B5Enli0UClZc7tlCviwNeh2URASYU8wBlKDrfe973zOa58XFhfb29gyJhcDCGjC+TZdW+g7RWmIbGlj5Uh3+RptLsyPVLi8vtb6+rjt37swdJIRl8Cgmw9dj4up6ihoKQ5pZxeD3pc+CQ3yf9/FF1uQTnz9/bjEUbhfnTcZiMSMAAGjR5AtLE+TkeoXD5i6VSiqXy4bOPn361BDgXq+ne/fu6U//9E+VyWQM1Q6Hw5ZySiaTOjo6UqVSsfdEsK5z51AQ3F+aKRo62nnghGfG1aVTou98EIlE7N+STJAp1aIaiesSNxJiZDIZlUolu+fS0pIBO3yWuPz111/X66+/bum2Wq2m/f19PXr0SM+ePZvrA+SBq297fOvCyWKzkEdHR5aX8gwRYgdIBaQwEFY4m7B8vFD5TY1gIiD8PGgFvfAieFi1oGX1LjEIsO8gEKxZvLy8VD6ft2T3wsKCyuWyFUyzGVZWVrS2tqZarfaZViXXcXEhjk8mE3OdHz16pA8//NAsFrlV3FdO0cK68M6wcQCl4KB65UO8zPOQ5mKOgr1vg2Vcw+HQToCWZCfUIbSAghxCtLy8PMfm8qd2oVSoACIHSkd56izh+bJPVldX9cMf/tBajlarVe3t7enFixc6Pj6e684RjPu/7fGdMYRCoasDXp8+fapwOKx79+5Jkm0iGB5+w5NIBoDx7JwgvO3dZwAMDwLxe5++8blQru3BIjYdzwNi6HOm3kWPRmenrVEJQnd7n5MlXSTJ4mjftf66HKc0SweBcL/33nv6xS9+YemJSOSq922z2dR//a//1U77WltbUyKR0OPHj/Xs2TPt7u4aOZxDgQFNSFkwECQ643HcgzQ7QNdbWw8m0apGkp0fyrviQdCfmKPjpVn3Q0gfkDvOzs4sVufA3VQqpVarZb2TcdVhlr399tsql8tGajg5OdHTp0/17NkzdTode5fPY2l9W+M7sZz+73a7rUePHmkymahYLJolggVCSQ4xXTKZtDM4cFMQzCCYw8bwQsl1EFi/kRAqbzGDLp3n+0qz3KzX4J5/yuB7mUxGR0dHevbsmUqlkm183Gw2HyBOsILFW22s2eLios7Pz/XLX/5SZ2dnWl9ft054m5ubOj091dHR0VwR9eLiop49e6azszMNBgO9/vrrljf0TZrBBDxBAmYNGAApJZSafxf+n0gktLa2plgsZk2+/WeISaE5QoQHrAJskq5cVnLgNI/DygL+QPVE6KfTqd566y29/vrrRgk9Ozuzgn1/EjvzG9yz3/b4zkgIPo/X6XT0/PlzA3okqdlsWsEsbiKWStJctzrvaiJYbAo/4R6aR/gAm7g2G8ujlFhWKGqeKOHjY2kGzkiyFBAWGjDl8vJSrVZLe3t7yuVytklx5cvlsoEs3pULzh9xLoT1arVqz+K7A1CD6Hu04qL2+33rMoer7V17aXZiG94GiGeQ3O6ZQz68IB6kuojqDwj40lUlCK4/7SwBhvz1aTDNida4wtFoVI1Gw47k8C7/cDjU3bt39Tu/8ztKJpMWZz579kzPnz+fI39cN9ff1fjOUinSvOvAeRee1YPAUQuIRaVNhCdU+9ImYkwsGAvE7zzKyr28gLPxGQizpM9YXE/YppqGzRF0dVEAlEH5A3QgrmNtS6WSAWDXWXLvulN1ARmDZ/Z1rSCicJV5loWFBUM0iTtJZfj2IN7NB7X1PWW9wkKR0tQ7n8/PWVDOEd3Z2TEW0uXlpRH1OX0NHu1kMpnrtP69731PiURCJycnVvBMJzysPnE8jcC+973vmWCSe93f39fJyYkRNYKkj+/atf3OLCfuDMAJqRFcQ0nGHvJNlomFIpGrzmxBpgha1hcX+43EvX0iXZoxgnxsx/DsIp/z9MLsUyoIAbEOCoDnhLMKIjqZTKzHD2gvjCdodT5O9kiiz0li9ejCIMkKAihzowyM8MGnnEiXUOwszTfcZs08Ad7nJ4PrSz0leVkO9cXbYO17vZ5ZsUajYUQSQp1oNKpOp6MPP/xQ7XZbiURCW1tbdrLZ0dGRnSIACEY66OLiQt///ve1vr5uv6OB3MHBwVx6zsf2QZT6y+xr1ubfO75T4jt/VlZW9Nprr2ljY8OEqtfr6fDw0DQwsY8kq0YH1WOjUKIEJA9EPxwOjXnEwnnmizQPVvlNJ80S7/7ZvSXD0pL/oxMCcRVuHs8KdY5yroWFBUuqYz05euH8/NysEsLL83qgC8QV9xmqGggmbSMRTBQZ9DwahXnlw3tBRkAIfccFvwn5PQSATCZjQr2xsaFcLqd0Oq1CoaDR6Kr30y9/+cs5ErsnHmQyGb322msql8t68eKFHj16pE6no//+3/+77t27pz/6oz9SvV43lNU/L2T7crmsd955xw426na7Ojs70+HhoR0d6FM+/16h+jot7Xfm1noQJhaLKZ/Pa2NjwzY0m+vs7Mw09XQ6tQbB29vbn0lpIKD0KIWPi1sGg6fVahkHlRjUC7lPXeAWeyTSp1eCKCrX4zoALOT0KMKGdoby6Xa7Vrsoae5wHVzX4H0RfCiE0NnW19cth0xxMaR7PBOOxEP4C4WCkRf8qVrMIe+DlfElY4QRPB/tXobDoSqVijY3N/XGG2+YAhyPx0YaoRyMvCfrTp3uxsaGMpmMksmk3njjDVUqFT169Ej5fF71el17e3tGYPcpFTyQd955R8Vi0ebx5OTEAKBgdQl786aM7xwQ8i4iYAM1mLlcTsfHx/adcDhsbQzr9frcCVFYOkAXL1DSrL0FxAViMhQB7rEHkHDByEd6qxIsl5JmvX3Q3rhtxJG4iliscrlsGwkmz3g8tuMASSV5t5Z3YT5wM6Gs0Tg6kUgYuukLDKiPlGRpk2KxqFKpZOASgumZVngPHrH2hQSSDHRaXl42UkM0GrX0RbPZ1Onp6VweG9om9EN6D925c8eYTJPJxOYDhlO/39cnn3yiSqViXgsuO+HKvXv39Nprr0manTC+v7+v/f191Wq1a+mJN2l8pyVjuGMIW6lUMuGKxWIqFAra3d01AQUk4tAk6QrlKxaLRkGTZEIOIISFxO2cTqdWd4jF8cIWrOQnPgO5DRIYPPnBWxCsiz8pDVeQyhDabdCuBcuKK0pO128eHxvzzNwDSwQNDsFFiUCdoy3k4uKi3njjDcXjcZ2cnHwmr4qC4n19/ti72UHiCKmJbDZrp5bD8iE9Fg5fdTesVCoaDodWAlcoFPTGG29oc3PTcsTgCKSX4PQSZxLf87zZbFZvvvmmUqmUKR06GXD4kVe27BvGTRDW71Q4pasJ6fV62t/fN4IBuSk6wEszYILYBDqcj8E49ZrhQSSPAPuUClxYBIBYDEvjm3wRx3K/l40gacCTGLywe8oaCoS4kz686XR6jkUTRBLxEkjMU0nCu2PJ+C5xOJbat4z0Hc9h2nAfT+LgnrwrAgsPt16vazQaWUf/J0+eqFgs6nvf+54pUNhIuKTRaNSOj6RAPRaLqd1umxdCgX0sFlOj0TA02yu9cDg8h2GgMJv/7yRr3wTar+PXKZhfF8r7nRdbs/DHx8eKx+Pa3d1VLBYzi5JOp3X//n2FQiEdHR2p0WhYLCTJgnxcStxInxMl5vMH+/BdrgVjBEtHA2NfE+lzotI8Z9dvUgZAjidB+MoLrDppIP6N4sEaoUyYM38PcqGZTMasBGVfeCa47pLmPAWElWMLoO55D8ArBN7F5xB5H5QMZWrZbFZbW1uaTqeqVqs6Ojqy4wh5N/LA6+vrVuR8dnamdDqtWCxm+IJvXYJbTrE9IQRzHI1Gdf/+fb322mvGzqL7Ow3WmJsgISaI0n/X4zsVTiZlNBpZ1TmnYxO8E/Nh4SDDexe12+1aA2NiOEqMaHrs4xIfZ/gYExeKYl5Pj8PSsik5EsC/C+9DygRkmGv7z7H5qeKfTOZPzgJplWTxIy4n18GVBlyhKRdFAiC/Ph0jzQQca+2PA+R5ycd6F9YDZcwTPydlhWteLBa1tbVlz0L/WSh5gD+pVMrOcKV9CrlWWmPyHDQY//TTT+04edqosC75fF737t1TOp2WJCs+2Nvbu/bohOv241cZwRz01zG+U0CIv9FYnU5HT548MbaINGtVghanWfLJyYnFH7TgB6Elv0Xejm7eUM6wkAiJR2GJDdmsviUnmxvXD0Hzv/PEBu8O+kE8izuHSyrJWmfiskmy0idiQIQL97fValmXd4gabMJkMqlUKmVKiDki50ncicvt2UpeQXngDlffE/On06kBTNlsVmtra8pkMur3+5bLnE6nevDggZLJpD755BMjucOKymQyqtVqhqqjWLgnSC5HIASJIQsLC9ZZj+c6OjrSp59+qv39/bk2I8FY8yalUBjfueUMumh0pAMYCIfDVoy7srJidZ4kq7Fyp6enVnXgi69xeb0rhzX0cZT/Oa4q1pYc6eXlpdLptFU90IkPZpC3pN6t9LlTrguYI83KreDj7u/vm+vnBdyjw1hy3gkXD5Dl/PxcKysrFrcSN0syJeRjX5+uYW2IiaVZnSsuuU85+fwi1pq6UoTS0xCpl1xbW5tzo31XQ9YH5Qk4R/c+kGHmeWFhwdqR4Ab3+33V63VVq1Xr1BdUlIzvmg103fjOASFvQUHaaCcBKEB+EI0fj8dVKBT07Nkz0/CkIzhxi5wd3E6sFdQ1gBE2uLcSvoEVVhXhSqVSikQi1u6fjUOy36dTQGVxIUFRcYup4sdCE2vV63U7lJbN75uDeWXi26SEQiGVy2VdXFzo7OxMKysr1swLV99zg6ENYpmwyngRuO6e2uZTUT595cG24XCoWq1mgA9dEj/99FN9/PHHajab+pM/+ROl02m9//77kmRd9Wkv0mq1bP48CZ/+ULRmgd6Xz+f18OFDOz1gMBjoxYsXhlPg5nuD4IXxpgmmJIW/6wdgsCHG47FOT091enpqSB0CQCsM33UN9w7LyEFICwsL2tzctKPZuT5xFhtqcXHRLAFEBAQWS9hsNhUKhYykTg+jVCplLCVpRuT2PVw93U2SCT+fp0IDIgbpBAQXBeJjVa5DLtLH5Dz38vKydQ/ADacouVAoKBKJGNDlUzXEbr7CxMeodHb3LCrelXfs9Xo6Pj7W8fGxHakxHA7193//96pWq3rnnXd0584d48TyfLjeVCSFQiGLJ6lJRQlKmptrkF5fz7m/v6/Dw8O5/kVeMG9yjlO6AZZTmrm3aLZut6tKpWJNkvkdOTyAnmQyaR3a0O4QvePxuN58800tLi7q8PDQzsfgMByfK/Tupy/29s9Eno0DaHO5nKSrzVwqlSTJ8nmgo2xcUjXEzZ7d4yl1KAqO7KtUKuZeYsF8uRtCiKUnt8mRgOl02o7jI37FOkIAgI7H/bHACCduLkoiyI1FyUEIKZVKajQakmZ0Q8Cf9fV1/eEf/qHu3bunWq2mR48eqd1ua2NjYw6FxlOQrlJonnrIetHLGPSdk9KZL047pyEc3kEQkb2J7izjRginNO9W0BYxmUxaW37QQIpmC4XCXJEuFg/U05cikUrpdDpzAuktAJUWHvjA2iLQCJ5PH6ysrNgxBHzeN+4K5tM8Z9enYYi5PMneu/pBIgKDTcv1sDbkhIfDoVEF+/2+zs/PDYzicCAfw6JQCCc8gotSQKnwWWJXam0vLy9VrVbNYhUKBf3Zn/2Z7t+/b0otl8tpd3fX5gxSBnHi5eWlVlZW5uaevkRegeDB0MYT9BpXFmDNC/5NIxu8bNwY4WQgIL5kCaAEi4Mw0iT4+PjYXFIs79nZmfWMlWS5U87y8GADuT+PhGJNJVnsB9uG3CoulOfqJhIJA67gCfsNARDEvWjohfLBuq+srNhm8zWq3uozF7yfpLmevhcXF9YzF/BsMpmY8PJMFEAH41lJc2V3kubmmYEio2wNYXvw4IHu3bunH/zgB9rY2NBoNDK+bzKZ1FtvvaVMJmPIcrfb1cnJiU5PT61jvaQ5IIvc7PLysqVtNjc3rbvBYDCwIwYpPWNfBWPNm2w1pRsmnAgGWtwDD8R1WBFAAc57RNDYlJVKxc60oBYUQIWNjqvnrYM0A6e8a+shfWJXKHe4UlgSrumBFN7Lo4UgvAgZLrufD68ksACAVFh8H28yR9RSRqNRSyFJsnpK4uXp9IrKyAHDbFYYUsyBB85QCCgLiqiJBSORiO7evat33nlHu7u7WlhY0OnpqVqtliKRiB3/R7sUOswTm47HY+MK09UfoAklQyrMW03WnlPHOp2Oer2epJl3wPq+CuPGCCebF8vCRvZInd+4ksy1pNMayB6bkZ6u0+l0rrtfsEQIIMKjjzwH2tazggA/OJkLlFaSlVJBRAgCLEG2UHCjLCxcHeqLWwlrxz+HJxPgVlKJQ9qITcsJYr7W0RPwg4wf76nwnqyFJyDwTLjACwsLc4XVKysr6vf7+uijj1StVo1qyZz0+309efJE0hUwlkqltLW1ZTGwr8LhoCTOSPFtUKm44dkgtLRaLbXbbVOSPh/r1/4mjxsjnGxcLAMpExbz4uJirvrEb3IsEBsCJhB8UV8PKc1AkeAm98goVsPHoNIsPsRycT0sn69/9IqGHB6fAYH2ngECSBExrrzfUNIsvYPFBYnt9Xp2dMD6+vpcTpTnAJxpNBqW4kAJogiDqSaEBEuOFSWnSS0q8Xe329XBwYE14MK19iR9WoWQPqnX6zo6OrJ529nZmavlJSTx8TmeE4g5neoBgkB8XwVBvG7cCOH08Zg0a9XIWZO4Vrg8aFfQO19YzcZCODmM18dNbADvxiH0HowJ/g630ceg0iwxj9DjEvIupCs8EQHhZsMD6qysrCgSiViqCMuHe8bm5HsICE2vAKM4shBiOdbOI7TUcnqvwM8N6+Hd8yBtb3Hx6nRrmlbXajU9efJE7XZby8vLyufzhqITY/M9mmAPh0MdHBzYIcocaksBNRiCR2vhUVNeB0LLIVYQRK5LlwQR25s6boRwSvNngJDsx3Jixbz1Go3mu74hxL4rOeieb3AszbdIYXhriiD6+FSSudZstiA07zc0Gp9cIvxcX0OIO+zBKS+oMGUoceIZPFuIa0HSJ54k5+djaR/DEsMNBoO5wmuf3w2SDnyoAbCWSqUM9X3+/Lk++eQTO4YeSwpJAqEaDAbK5XK6c+eOlpeXdXh4qFgsprt371qMyhxPp1M1Gg1DnlHKkEsoEieNxKG6YAHsl+v2200fN0o4pRnhmriLPJpHWGlOXCgUrNaT5sNUjXC8uc+PkXP0NDju6VMJ/N8jovycTekbirHpYd/AbSU36l1khBBrhlfgARxAllQqZSVynkfKswGCEKORrE+n0xYOwA0mzptMJiY0xM2pVMrWwOdVfZ4XwfQKhFzzcDjUr371KysVo0gc5LRYLGplZcUOmpKkRqNhrTBhQy0vL5vFl2RAFce7s0bM4/LysjKZjFW0kOMmJmVcFxq8CuPGCKc0K8FKJpNKJBI2kV44fQzoWSy4tpPJZO5ULUAg707yt098e0FF6DwIwuYEMQUE8Yvt6XW+gsTfzysF7utRYbijWMFKpWIC6rm0fJd74aZKMovsmzFTBoc1ZK4Bi3heT9sjr+yVmQeNAGIODg60t7enWCymra0t5fN5NRoNa7rFidIcnkuh98XFhQqFgrm5KFJP1ODZPGKMW0+JHC4svWopWvDubDC/+SqMGyGcaGvcQjp4k+fzgoeFwDriXiaTybnN5ZPj3oX1oBDXljSH2PqqFdISMJWwkhCxvQLh/rlcbo5AwDN41HB5edkoev5Zh8PhnFIBRPIgmM9Bkhf1LnQ+n1c6nTbh4OAfUkHSVWf5lZUVywN6hcR8+ef1BHcfSpyfn+v09FSLi4t2Nmm5XNbx8bEajYZZPthHvAtu/2g0srjUp8sgUdCGJNh6c2NjQzs7O1ZdQ1zO30FrH4w7X4XxnbbG9K6G/79vFIzWRmCI82gvicvDmZDETKRdgqkDBIH4j35FaGncLlxJUhMoDw/kSDM3HOUhzY5Zh6QNmRuwBiHxGzWRSBh5v1AoKJ/Pq1qtWiMqn/7BAvM96l8Hg4E1jsaVByxh09JXiFQNz4NAMt/SfEdC3HtfqcP8DwYDFYtFra+vW6yby+VULpd1dnZmHFeeD88IlJlKEuJK5po19+/O/ymFo5yuUqlYbpMUF9+5bs+9CuNGWE5pVgfpaXMeCPIEAlxc3KClpSWVSiXrngBQQXqBONETwD2wApJJ4TOsHRYWwaW5NQMh9cgprByUBoXDWFreQ5JxhRFk4sdkMqmFhQVjHm1sbNhR6YAzPq/Hc4FY4z1wAJBvY8l3yTkSMzJHkuYsM/PApiZWxqL3+33F43Ftb29rfX3d5jwcDqtUKhnTC9ebeWCteQ8qcrgX7rYnbvD8WEriVgAqzkbxLDJPPnjVxndebC3N2jz6OANXhnRKr9fT2dmZbXyOKh8MBspkMrp7965qtZpZNhgvLBobFaEPplPoFQuyiPvn3U0qX/gd4A0gFJsumUwql8spkUjYUQmSDMBB+Hme8Xiso6MjO9SVZluDwUCFQsEEH3eScqx+v69Wq6WjoyMVCgV7HiwqCoJj9CRZEt+DO8G2Hbis/p1wsz2iTpUIbU3JNeJCJpNJlUoltdtt48pSDH92dma4wkcffaR8Pq9SqaRsNmseEKR1wC9JRpCIx+OmjDxrCAXAswYpe6/SuDGW07NlfKXGeDw2d4xFKJVKWltb069//WtrUlUsFk3zIiC094fwDkoJGulJDGxscoU+9mJzAEr4+E/6bM3nwsKCxX1HR0cmVJ53yjtzbQjjdG2AGYSbzj3h3eLeYe08ta/ZbCqRSFgOkDmlXy3xXTweV7fb1eHhocV0MHI82AXS7d1bgJdQKKTd3V0VCoU5wj/KLJPJ2EG3vjsfJ02Px2OdnZ1pb29PW1tbevPNN5XL5eZywMwDQA95XDwM0GPIJ8F89qsomNINEU6v4dDkCAgbASSyWCxa9+6TkxMr0gXaX19ft56we3t7Ojw8tIQ4JWjkQxF+31rRo6k+7ynJkuJ8N4jk8rwAEpC84fFyLQ/zI1zLy8sWr/mu7FiadrttbBpPqmDjetQXd7lcLms4HOr4+NjcfVxG+hWxqf37Bq2N3+TMGUIciUSUy+UUj8fn+Mv+usSnKIVsNmssHhDl8XhslSzRaNSIBH5f4JKn02llMpm5fDYuLRY++Nyv4rgRwinN3CVJtnFIWRAXgqACCgHooFGz2axSqZTFmcSrlJBRzCzNWD6+wRcLiZXyRASfCw2mQ/z3cAkREjYUFpCGVOQ7SbYDRPGetVrNist5T077kmSEds8sojKG+lKofO12W9Pp1ASoVqup1WpZz1j6/ODGcn2P0jL3MHOgUhJmnJ2dWfEzQgO442N4j/T2+32trKzYszNvlHoRd4KqM8fFYlG5XM6UHp4VLq1fm6BgvkrCemOEU5pZIDoEwPCBDseBqAiYL4MiVmWzn5+fz7F9oL8tLCzYhvaIK5/z2tYn/n2lvx+e6obQeXqbz6fyc67JZsSqIswICQ24aLGBUkAJSDMl4zsTSrK4dDweG8cYUIxmzL69qM/5+mf0qDVuLaVx5EbPzs6MgODjY/9ZmEJYQEnmogNKgTn4Pr2kc/xc++J0KoNA2n2uGWUQ3GOvyrgRwhmE7C8vL61dI0lz3FLcpcFgYFUHnU5He3t7tvhwUu/cuaPHjx+b9iX9AGKISxmkw3n0FVhf0mf+9pYXcGIymajVahnhnuqOICOJf6NQJFlDMiw+wAm9W7mWNEN6scTEdKPR1Sla3W7X2rzgPhK7SrK6006no1qtZteCRcW8IzCAQp4AwftRAbK0tGRUPknm3qJ0UQAQC5jHxcVFra2tqd1u6/j42JqD+zCC52Z/SDJGmC8A8CQNP14li8m4EcLpB5NPAh1AIxQK2eGu4/HYGCY0Dm6329bFXLo63xKytQdh2Bhc0y+aT6+g/X0M5pk8noCAkAIEhUIh7e3tWSsNaHRYYyo5Li4u1Ov1rGUIeTrIF0dHR1ZZQTcDcpN4Ez4G9IXgzWZzrtCcKheKsnE5+/3+HGcZJcO8YaVwF3HtQ6GQHSvIfCwvL2tjY0P5fF7j8djOzpRk3gHPh5tO+HHnzh01Gg2LOUnveK4vAkiHDA6JItXmlcd17KxXbdw44ZSuNk6r1dLx8bHW1tYsBwYQg2UAGQXkIV9YqVT05MkTTSYT+xxCzWJ6d9TnxbBqWEyPyqKZPRHel5r1+32jqi0uLlpPIRg/WB8aRCMguLekjCqVirmzUNsKhYKKxaIikYhdl1gQIUwmk9b3FoFkY+O6ExLgKtOvKNjBnZyrV0K4uAgBVSEbGxvWOoYj46vVqlEGUYi0SMGlZ+5847F0Oq1qtapGo2FhAUJJgX08HjdlR7x5XS/d4HjVrOeNEc7gpA0GAx0cHBjrJBqN6vT0dK6TGu0iSXSzsTnJCoGkox2wO9+RNHd6GDEbf/wzYR2kWWohSG27uLjQ0dGR2u221tfX9fDhQ52cnJjlXlhYUKfTsRYa4XDY8pjSrAUItYi4moVCQTs7O0okEtZ6g98zsKaQ5qHnSZpre4lioiSLulmPkkuac/H5no8nYTK99dZbKpVK6vf7yufzikajqlarls9NJpNWv3l5eWnpD65JyAKzqdvtWg/iILUS19grVTr1Ee74KpxXfdwY4QwOXNejoyOtr69reXlZzWZTjUbDXEgO+aG3DwuFZid+4xwRX55FfAYiikWAJC5pjpfJPdmguJdsVg9WADhhTSVZo6pOp2NdAbAqfOb4+NhOW5auFEexWNRrr72mfD6vWq1m531419wDRf5Pv9+fUyKSjItMXIu7TP8frJVvT+K7pAPWEXpIsoOCu92ugTSVSsVO2I5Go1anCcrsObB0ruA5fRsSH3rwDggtSg3Px/NppdtUytc+PGWu1+vp+fPnymaz2t3dVSqVsobJkuyEaCrg4dh6wjvHN1AtP5lM7GfT6dTOhJxMJup0Omo2m4YsYlU8l9YjgdJnEU0UQLvdnmMO0XH8+PhY7Xbb8pW0tqStCumIdDqtzc1N3blzR7lcTs1mU8+fPzeOqkeDiTF5d1xWro0iwioSQ5LmQBmhcLyFxYOA/ucF4fj4WB999JEpP4SaXC6CTF9ZSOpYfMAc2o3evXvXgDkPRJHrhXLoBZi0mac0BveS9GqhtIwbJZw+mMeScZJxOp3Wzs6OnWVCxz3OuHzy5InB6biLFGsDKnmaWqvV0vLysnK5nHK5nEKhkBqNhg4ODsyKgFiS9Cad4tlCuFFB4oJnt+BWEiuCJFOvSosVhDWRSGhtbU1bW1vKZrOq1Wr6+OOP7XwY79pxH6h8vvs8lD5+Rp8hrBVxOkR2BiAbishX8niCwmAw0OHhoer1upaXl+0k8vPzc2tA7RUfsTAxNvNE25RqtWqAlUfvvVKA7AEYBivIc5aD5YX+eq/SuFHCKc0EE/fk8vJSBwcHttivv/661fHt7+8boMGiAdJQjYEGZ2HZaCsrKyqVSlpdXVW5XFY2m9XCwtURdvv7+zo6OtLJyYlqtZoODw91enpqJ0bTt0aa5QSDZHEfn/q0STwe1x/8wR/o3r17dsAOcSbI5s7Ojh48eKDpdGoHvh4dHc3VXEqaex/+78EsFAj5Tp+LJZ7FxQc5RfCh+yFEDISE1M5gMNCnn35q9Ejv3vIMKAPuiUKQZPcej8eq1WoWC3uyh6/NjMfjWl1dVTweNy+DUMXzs/1+8n+/SuPGCaf02bxnr9fTs2fPLEZ666239Lu/+7uKRqN2TCBHAHrkFCSP6yG0ENPz+bwymYzi8bi5unfv3tX3v/991Wo1HR0daX9/346Pq1arOj4+NiQ0l8vZgb8MX2kBIQDkttVqmRWGB0wMxsFMnhZHP56joyPbcFyfe0DUJ+0gaa5Tw3Q6NS4urr9n//CcoKKeGogi8GwqhEqSteV88eKFRqORfvrTn+rNN9/U3t6enTxOmgMrj5vL/YfD4dwxhR6h9fxe3i2dTqtQKGhlZUXNZnPuwF/mxcenr/K4scLpwYDJZGLnMvZ6PR0eHuru3bu6d++eYrGYtSqRZj1bPdGbthmkExKJhPL5vLX5Pz4+VqVS0crKirLZrMrlstbW1lQul3Xnzh397u/+riaTiZ48eaJ3331Xz58/V6vVUrVaVSaTUblctjgV5JNnp7yKjnq0i6zX62q321ap0W63rb1Iv9/Xs2fPrJ6Tze0T9ygv4jAsH10K8/m8stmstUxhE0OGp1JE+iwS7S0///Z/pJniIWY9PT3Vr371K/34xz+208hJx9B9j3kJgjYoHNxhaJvsA9aNv4nnuQeW1rvhn0fhe1XGjRROP7xbcnFxoefPn+v09FSlUkm7u7uWA8WNoh/N6uqqdnZ25g4aItbErSKPKMn66+A+I5yUrW1tben73/++fu/3fk/7+/v68MMP9f777+vw8FCPHz/WysqKCoWCpQ58KgIhKJVKqtfrOj09NWJCLpdTMpk0S09OlvgNviyCCEjiLQpoNWEAIJZnL9FBnTwkR81jdUCNvfUCMeUaXI84GUtYKpUUCoX06NEjJRIJff/737f6WiwZn51Or0rJEDbK26Srs1jpzCDNStdgFUH3k2Tr6BFaL/DB/SO9enHnjRZO7976DQPN7fDw0EqzSqWScrmcNXx6+PChdnd3rQOd75TXbDb1+PFjNRoN3blzR4VCwUCbdruter2ujz/+WO+9955ZxGw2q/X1dW1vb+vNN9/U3bt39bu/+7v64IMP9O6772pvb097e3u22VKplAErJNFJoOOG0hAZTigdGXBHqej38bIn44Nkks/l/4BQgEKSrIYT8GhhYcF+t7CwYFRI5l2audBsft4jEomYoCDYhUJBvV5PH3/8sSHNCB1uMYXkiURirs1pKBSys1j9UfKSjJLpWVEUxJPzfhltL8gAe5UEU7rhwil9NqD3bB3KyarVqk5OTpRKpZTL5ZTNZvXkyRNdXl5qa2tLKysrtkhLS0sqFos6Pz83cIEDcUE2ybFyyOtwONSTJ0/05MkTJZNJra6uanNzU/l83uKs999/X++//74qlYp1HGfDxONxbWxsKJvNajKZWK7W9yEiaQ9LCFcUQMRzf6UZv5d8ZK/Xm+PexuNxa+UC28iXl+Ft+PacINHSLH8apCniNpKmgXgOM+rg4EAfffSRKbTV1VXzUiRpdXVVS0tLajabcxU6rCkxN0AebjbVR5xonUgkjIQAxfBVtpLXjRsvnAxcFiyGt6YsXKvVskNwYKcgRFQ/oLlLpdLc0XO4tVwbIWdD43b6A3Ki0ajK5bJ2dnb0e7/3e9rd3bU4kXKt4XCodDqt7e1tLS4uWnNkj5oSI+JScnyBz6ciwAiHJ+f7ShgEdDgcqlKpmGWjdQiWB9eZg3p9iR11pP5kM1BWLLknOpyentoxEvl8Xs1mU++9956dMl4sFg0M29zctJ6yxJMoGElzpXUgsbjrvuMBz+aRa2mGVzBeZQF9ZYST4elyQe2I60Q/nEqlYg2L6QuL0JVKJTu/0sdAJMpHo5HlSTkHJBq9OtGKrn+0ZHz8+LGWl5eVSqW0s7Ojzc1Nff/73zc3MhQKqVar6fnz5+r1ekblA7ldWFjQ+fm55Sqr1apZCA9ueLfT16GCalMIsLCwYPWNgCuAaljmy8tLdTod1et1+7+vmsFicS+fx+V+sIvq9bomk4lRJAeDgR4/fqznz59rfX1d+XxexWLRYlWvVH2HiEQiYa1PfLcKFBFpIklzgsscBPfJqz5eSeH8vIXwAkuFC8Tws7MzsyxYB9qC0PYCTiokboSTWGs6nVozY/KoZ2dnBqjQegMXMZ/P6+LiQgcHB4YwQgbIZrNKJpNmiUGiT09PjUzhAQ+svDTPM2UjX1xc2KlcvDuuIsKPMJB3RAFAUsBd9D2DuJ8HiJjrdDotaVYgj8UlZbO/v6/RaKRcLqfFxUWrxPHkDX92KnMqzbjGvCtrRuqFe3qX9lVnBfnxygmnHz7lElwcD9v7TU1jLO+u+a51gB2ZTMbO7EgkEspmswY4ZbNZQ2GxGAwAClqLkOKAt0sKgxzpcDhUtVpVtVpVp9Ox1iv+3RB23odn9e/MzygAkGSMIV8SxgZHKOPxuHq9nlk/hAEgCwvtGU/8DY0wm81aEQL3y2azKhQKuri4sPhyZ2dH8XjcDsfFK2i324YoSzMiB+4tHgBrtbKyMgeOBeNN3vVVH6+0cDJeFmN4EMmzjqR5Dq9fXNIt1WpVL168MAofudGdnR396Ec/0muvvaZoNGrQP8isr94fDAaq1WpGhqd2cnV1VXfu3FEymdTx8bH29/d1cnJiR9x5d9jH2NKsKZiv6vCEcQoAEBy61PFdDkjyBQDj8dgO70X4vSvplQRzhIDF43GVy2Xl8/m5Vi6FQkHLy8vW5Z1cZ6lUMuAICqEk82ZAzSkD9LE2SiiolIMsot8EwZR+Q4Tzi4zr0Dxp/pTjYOoA0AFi+tHRkZ49e6b9/X398R//sX7v935P5XJZzWbTUjVUxZyfn+v4+Fij0UgrKysGppA/jcfjVnf6/Plz6xmElcQS+mQ67qsXFAQIYc7lcqpUKppOp8pkMtbQmraR0pW3wGlmFA0sLS19pgjdFxD4Vi6SrE51Z2fH0leLi4vmVYCkQnTH/V9bW1M8HlcmkzHUmlpV3xcYcMx33AOd51RxBBLk3q/ldf9+1cZvjXBK11tV7/p6q+r/eECm3W7r/ffft4T5n/3Zn1kjZar8aQEZjUbtOAFADkgK9XrdqHl0TffP5fsSSbNDdQGpcCH5HL2VisXi3CllCBXvRXE67+WRT2kWW8K6CZbNIXjcF1YS3ddRUJ1OZ+5YiVAopHq9rlqtZmV8g8FArVbLnol+URAeILUjeL4/kO8f9bKYM7jmr9r4rRLO68Z1CeqghSXeYaMPh0Pt7e2ZVfnRj35k7iZuJAICSkpTZZLq0Oqo7eRenkQAqougY3Foz9FoNBSLxawLHlUoWFwYNlg5wKN+v2/uI64vbB7AIk96ByBCUfB/Cqur1aru3bunra0tTadTU0iAWpKM1wwRgwobehzR74g5JAQgjgaM4sBc6Jm+refnreOrOH7rhdMPH8cEFxtqn2foHB4e6h/+4R+0vLyshw8fmqUajUZqNBpzR9f5lirJZFKj0dUBPhwhwSb2hc2e3A5N0XfYQxCr1aqBMiC2vV7PSuIQ9PF4PIeKch3PwKH+E0HFSvuUDKmpUCikUqmk0WikFy9eSJI2Nzf14MEDFYtFPX36VPV6XfV63QggkUhE6XRaoVDIlEs0GrVyMvKsxMUIKFac52m329ZGxa+TR3dfdeEMveovcDtux2/qCP/bH7kdt+N2fBfjVjhvx+24oeNWOG/H7bih41Y4b8ftuKHjVjhvx+24oeNWOG/H7bih4/8H/JP3vnowsgQAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], "source": [ "ax = im.show(anatomical_plane=0)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/nbs/02_vision_data.ipynb b/nbs/02_vision_data.ipynb index 40ba067..d76646f 100644 --- a/nbs/02_vision_data.ipynb +++ b/nbs/02_vision_data.ipynb @@ -53,13 +53,20 @@ "outputs": [], "source": [ "#| export\n", - "def pred_to_multiclass_mask(pred:torch.Tensor # [C,W,H,D] activation tensor\n", - " ) -> torch.Tensor:\n", - " '''Apply Softmax function on the predicted tensor to rescale the values in the range [0, 1] and sum to 1.\n", - " Then apply argmax to get the indices of the maximum value of all elements in the predicted Tensor.\n", - " Returns: Predicted mask.\n", - " '''\n", + "def pred_to_multiclass_mask(pred: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Apply Softmax on the predicted tensor to rescale the values in the range [0, 1]\n", + " and sum to 1. Then apply argmax to get the indices of the maximum value of all \n", + " elements in the predicted Tensor.\n", + "\n", + " Args:\n", + " pred: [C,W,H,D] activation tensor.\n", + "\n", + " Returns: \n", + " Predicted mask.\n", + " \"\"\"\n", + " \n", " pred = pred.softmax(dim=0)\n", + "\n", " return pred.argmax(dim=0, keepdims=True)" ] }, @@ -70,12 +77,16 @@ "outputs": [], "source": [ "#| export\n", - "def batch_pred_to_multiclass_mask(pred:torch.Tensor # [B, C, W, H, D] batch of activations\n", - " ) -> (torch.Tensor, int):\n", - " '''Convert a batch of predicted activation tensors to masks.\n", - " Returns batch of predicted masks and number of classes.\n", - " '''\n", + "def batch_pred_to_multiclass_mask(pred: torch.Tensor) -> (torch.Tensor, int):\n", + " \"\"\"Convert a batch of predicted activation tensors to masks.\n", + " \n", + " Args:\n", + " pred: [B, C, W, H, D] batch of activations.\n", "\n", + " Returns:\n", + " Tuple of batch of predicted masks and number of classes.\n", + " \"\"\"\n", + " \n", " n_classes = pred.shape[1]\n", " pred = [pred_to_multiclass_mask(p) for p in pred]\n", "\n", @@ -89,16 +100,21 @@ "outputs": [], "source": [ "#| export\n", - "def pred_to_binary_mask(pred # [B, C, W, H, D] or [C, W, H, D] activation tensor\n", - " ) -> torch.Tensor:\n", - " '''Apply Sigmoid function that squishes activations into a range between 0 and 1.\n", - " Then we classify all values greater than or equal to 0.5 to 1, and the values below 0.5 to 0.\n", + "def pred_to_binary_mask(pred: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Apply Sigmoid function that squishes activations into a range between 0 and 1.\n", + " Then we classify all values greater than or equal to 0.5 to 1, \n", + " and the values below 0.5 to 0.\n", "\n", - " Returns predicted binary mask(s).\n", - " '''\n", + " Args:\n", + " pred: [B, C, W, H, D] or [C, W, H, D] activation tensor\n", "\n", + " Returns:\n", + " Predicted binary mask(s).\n", + " \"\"\"\n", + " \n", " pred = torch.sigmoid(pred)\n", - " return torch.where(pred>=0.5, 1, 0)" + "\n", + " return torch.where(pred >= 0.5, 1, 0)" ] }, { @@ -116,45 +132,16 @@ "source": [ "#| export\n", "class MedDataBlock(DataBlock):\n", - " '''Container to quickly build dataloaders.'''\n", + " \"\"\"Container to quickly build dataloaders.\"\"\"\n", + " #TODO add get_x\n", + " def __init__(self, blocks: list = None, dl_type: TfmdDL = None, getters: list = None,\n", + " n_inp: int = None, item_tfms: list = None, batch_tfms: list = None,\n", + " reorder: bool = False, resample: (int, list) = None, **kwargs):\n", "\n", - " def __init__(self, blocks:list=None,dl_type:TfmdDL=None, getters:list=None, n_inp:int=None, item_tfms:list=None,\n", - " batch_tfms:list=None, reorder:bool=False, resample:(int, list)=None, **kwargs):\n", + " super().__init__(blocks, dl_type, getters, n_inp, item_tfms,\n", + " batch_tfms, **kwargs)\n", "\n", - " super().__init__(blocks, dl_type, getters, n_inp, item_tfms, batch_tfms, **kwargs)\n", - " MedBase.item_preprocessing(resample,reorder)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "### MedDataBlock\n", - "\n", - "> MedDataBlock (blocks:list=None, dl_type:fastai.data.core.TfmdDL=None,\n", - "> getters:list=None, n_inp:int=None, item_tfms:list=None,\n", - "> batch_tfms:list=None, reorder:bool=False,\n", - "> resample:(,)=None, **kwargs)\n", - "\n", - "Container to quickly build dataloaders." - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(MedDataBlock, title_level=2)" + " MedBase.item_preprocessing(resample, reorder)" ] }, { @@ -172,6 +159,7 @@ "source": [ "#| export\n", "def MedMaskBlock():\n", + " \"\"\"Create a TransformBlock for medical masks.\"\"\"\n", " return TransformBlock(type_tfms=MedMask.create)" ] }, @@ -190,117 +178,35 @@ "source": [ "#| export\n", "class MedImageDataLoaders(DataLoaders):\n", - " '''Higher-level `MedDataBlock` API.'''\n", - "\n", + " \"\"\"Higher-level `MedDataBlock` API.\"\"\"\n", + " \n", " @classmethod\n", " @delegates(DataLoaders.from_dblock)\n", - " def from_df(cls, df, valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,\n", - " y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, reorder=False, resample=None, **kwargs):\n", - " '''Create from DataFrame.'''\n", - "\n", + " def from_df(cls, df, valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='',\n", + " label_col=1, label_delim=None, y_block=None, valid_col=None,\n", + " item_tfms=None, batch_tfms=None, reorder=False, resample=None, **kwargs):\n", + " \"\"\"Create from DataFrame.\"\"\"\n", + " \n", " if y_block is None:\n", " is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None\n", " y_block = MultiCategoryBlock if is_multi else CategoryBlock\n", - " splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)\n", "\n", + " splitter = (RandomSplitter(valid_pct, seed=seed) \n", + " if valid_col is None else ColSplitter(valid_col))\n", "\n", - " dblock = MedDataBlock(blocks=(ImageBlock(cls=MedImage), y_block), get_x=ColReader(fn_col, suff=suff),\n", - " get_y=ColReader(label_col, label_delim=label_delim),\n", - " splitter=splitter,\n", - " item_tfms=item_tfms,\n", - " reorder=reorder,\n", - " resample=resample)\n", + " dblock = MedDataBlock(\n", + " blocks=(ImageBlock(cls=MedImage), y_block),\n", + " get_x=ColReader(fn_col, suff=suff),\n", + " get_y=ColReader(label_col, label_delim=label_delim),\n", + " splitter=splitter,\n", + " item_tfms=item_tfms,\n", + " reorder=reorder,\n", + " resample=resample\n", + " )\n", "\n", " return cls.from_dblock(dblock, df, **kwargs)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "### MedImageDataLoaders\n", - "\n", - "> MedImageDataLoaders (*loaders, path:'str|Path'='.', device=None)\n", - "\n", - "Higher-level `MedDataBlock` API." - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(MedImageDataLoaders, title_level=2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "### MedImageDataLoaders.from_df\n", - "\n", - "> MedImageDataLoaders.from_df (df, valid_pct=0.2, seed=None, fn_col=0,\n", - "> folder=None, suff='', label_col=1,\n", - "> label_delim=None, y_block=None,\n", - "> valid_col=None, item_tfms=None,\n", - "> batch_tfms=None, reorder=False,\n", - "> resample=None, path:'str|Path'='.',\n", - "> bs:'int'=64, val_bs:'int'=None,\n", - "> shuffle:'bool'=True, device=None)\n", - "\n", - "Create from DataFrame.\n", - "\n", - "| | **Type** | **Default** | **Details** |\n", - "| -- | -------- | ----------- | ----------- |\n", - "| df | | | |\n", - "| valid_pct | float | 0.2 | |\n", - "| seed | NoneType | None | |\n", - "| fn_col | int | 0 | |\n", - "| folder | NoneType | None | |\n", - "| suff | str | | |\n", - "| label_col | int | 1 | |\n", - "| label_delim | NoneType | None | |\n", - "| y_block | NoneType | None | |\n", - "| valid_col | NoneType | None | |\n", - "| item_tfms | NoneType | None | |\n", - "| batch_tfms | NoneType | None | |\n", - "| reorder | bool | False | |\n", - "| resample | NoneType | None | |\n", - "| path | str \\| Path | . | Path to put in `DataLoaders` passed to `DataLoaders.from_dblock` |\n", - "| bs | int | 64 | Size of batch passed to `DataLoaders.from_dblock` |\n", - "| val_bs | int | None | Size of batch for validation `DataLoader` passed to `DataLoaders.from_dblock` |\n", - "| shuffle | bool | True | Whether to shuffle data passed to `DataLoaders.from_dblock` |\n", - "| device | NoneType | None | Device to put `DataLoaders` passed to `DataLoaders.from_dblock` |" - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(MedImageDataLoaders.from_df, title_level=3)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -323,15 +229,24 @@ "source": [ "#| export\n", "@typedispatch\n", - "def show_batch(x:MedImage, y, samples, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " '''Showing a batch of samples for classification and regression tasks.'''\n", - "\n", - " if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)\n", + "def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None, \n", + " ncols=None, figsize=None, channel: int = 0, indices=None, \n", + " anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Showing a batch of samples for classification and regression tasks.\"\"\"\n", + " \n", + " if ctxs is None: \n", + " ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)\n", + " \n", " n = 1 if y is None else 2\n", + " \n", " for i in range(n):\n", - " ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]\n", + " ctxs = [\n", + " b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) \n", + " for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n))\n", + " ]\n", "\n", " plt.tight_layout()\n", + " \n", " return ctxs" ] }, @@ -343,21 +258,25 @@ "source": [ "#| export\n", "@typedispatch\n", - "def show_batch(x:MedImage, y:MedMask, samples, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " '''Showing a batch of decoded segmentation samples.'''\n", + "def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows: int = None,\n", + " ncols: int = None, figsize=None, channel: int = 0, indices: int = None,\n", + " anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Showing a batch of decoded segmentation samples.\"\"\"\n", "\n", " nrows, ncols = min(len(samples), max_n), x.shape[1] + 1\n", " imgs = []\n", "\n", - " fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)\n", + " fig, axs = subplots(nrows, ncols, figsize=figsize, **kwargs)\n", " axs = axs.flatten()\n", "\n", - " for img, mask in list(zip(x,y)):\n", + " for img, mask in zip(x, y):\n", " im_channels = [MedImage(c_img[None]) for c_img in img]\n", " im_channels.append(MedMask(mask))\n", " imgs.extend(im_channels)\n", "\n", - " ctxs = [im.show(ax=ax, indices=indices, anatomical_plane=anatomical_plane) for im, ax in zip(imgs, axs)]\n", + " ctxs = [im.show(ax=ax, indices=indices, anatomical_plane=anatomical_plane)\n", + " for im, ax in zip(imgs, axs)]\n", + "\n", " plt.tight_layout()\n", "\n", " return ctxs" @@ -376,17 +295,30 @@ "metadata": {}, "outputs": [], "source": [ - "#| export \n", + "#| export\n", "@typedispatch\n", - "def show_results(x:MedImage, y:torch.Tensor, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " '''Showing samples and their corresponding predictions for regression tasks.'''\n", + "def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n: int = 6,\n", + " nrows: int = None, ncols: int = None, figsize=None, channel: int = 0,\n", + " indices: int = None, anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Showing samples and their corresponding predictions for regression tasks.\"\"\"\n", "\n", - " if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)\n", + " if ctxs is None:\n", + " ctxs = get_grid(min(len(samples), max_n), nrows=nrows,\n", + " ncols=ncols, figsize=figsize)\n", "\n", " for i in range(len(samples[0])):\n", - " ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]\n", + " ctxs = [\n", + " b.show(ctx=c, channel=channel, indices=indices,\n", + " anatomical_plane=anatomical_plane, **kwargs)\n", + " for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n))\n", + " ]\n", + "\n", " for i in range(len(outs[0])):\n", - " ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]\n", + " ctxs = [\n", + " b.show(ctx=c, **kwargs)\n", + " for b, c, _ in zip(outs.itemgot(i), ctxs, range(max_n))\n", + " ]\n", + "\n", " return ctxs" ] }, @@ -398,13 +330,23 @@ "source": [ "#| export\n", "@typedispatch\n", - "def show_results(x:MedImage, y:TensorCategory, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " '''Showing samples and their corresponding predictions for classification tasks.'''\n", + "def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None, \n", + " max_n: int = 6, nrows: int = None, ncols: int = None, figsize=None, channel: int = 0, \n", + " indices: int = None, anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Showing samples and their corresponding predictions for classification tasks.\"\"\"\n", "\n", - " if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)\n", + " if ctxs is None: \n", + " ctxs = get_grid(min(len(samples), max_n), nrows=nrows, \n", + " ncols=ncols, figsize=figsize)\n", + " \n", " for i in range(2):\n", - " ctxs = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]\n", - " ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs) for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]\n", + " ctxs = [b.show(ctx=c, channel=channel, indices=indices, \n", + " anatomical_plane=anatomical_plane, **kwargs) \n", + " for b, c, _ in zip(samples.itemgot(i), ctxs, range(max_n))]\n", + "\n", + " ctxs = [r.show(ctx=c, color='green' if b == r else 'red', **kwargs) \n", + " for b, r, c, _ in zip(samples.itemgot(1), outs.itemgot(0), ctxs, range(max_n))]\n", + "\n", " return ctxs" ] }, @@ -416,14 +358,25 @@ "source": [ "#| export\n", "@typedispatch\n", - "def show_results(x:MedImage, y:MedMask, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=1, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " ''' Showing decoded samples and their corresponding predictions for segmentation tasks.'''\n", + "def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int = 6, \n", + " nrows: int = None, ncols: int = 1, figsize=None, channel: int = 0, \n", + " indices: int = None, anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Showing decoded samples and their corresponding predictions for segmentation tasks.\"\"\"\n", + "\n", + " if ctxs is None: \n", + " ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, \n", + " figsize=figsize, double=True, title='Target/Prediction')\n", "\n", - " if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True, title='Target/Prediction')\n", " for i in range(2):\n", - " ctxs[::2] = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]\n", - " for o in [samples,outs]:\n", - " ctxs[1::2] = [b.show(ctx=c, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]\n", + " ctxs[::2] = [b.show(ctx=c, channel=channel, indices=indices, \n", + " anatomical_plane=anatomical_plane, **kwargs) \n", + " for b, c, _ in zip(samples.itemgot(i), ctxs[::2], range(2 * max_n))]\n", + "\n", + " for o in [samples, outs]:\n", + " ctxs[1::2] = [b.show(ctx=c, channel=channel, indices=indices, \n", + " anatomical_plane=anatomical_plane, **kwargs) \n", + " for b, c, _ in zip(o.itemgot(0), ctxs[1::2], range(2 * max_n))]\n", + "\n", " return ctxs" ] }, @@ -442,15 +395,21 @@ "source": [ "#| export\n", "@typedispatch\n", - "def plot_top_losses(x: MedImage, y, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " '''Show images in top_losses along with their prediction, actual, loss, and probability of actual class.'''\n", + "def plot_top_losses(x: MedImage, y, samples, outs, raws, losses, nrows: int = None, \n", + " ncols: int = None, figsize=None, channel: int = 0, indices: int = None, \n", + " anatomical_plane: int = 0, **kwargs):\n", + " \"\"\"Show images in top_losses along with their prediction, actual, loss, and probability of actual class.\"\"\"\n", "\n", - " title = 'Prediction/Actual/Loss' if type(y) == torch.Tensor else 'Prediction/Actual/Loss/Probability'\n", + " title = 'Prediction/Actual/Loss' if isinstance(y, torch.Tensor) else 'Prediction/Actual/Loss/Probability'\n", " axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize, title=title)\n", - " for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):\n", + "\n", + " for ax, s, o, r, l in zip(axs, samples, outs, raws, losses):\n", " s[0].show(ctx=ax, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs)\n", - " if type(y) == torch.Tensor: ax.set_title(f'{r.max().item():.2f}/{s[1]} / {l.item():.2f}')\n", - " else: ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')" + "\n", + " if isinstance(y, torch.Tensor): \n", + " ax.set_title(f'{r.max().item():.2f}/{s[1]} / {l.item():.2f}')\n", + " else: \n", + " ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')" ] }, { @@ -461,14 +420,26 @@ "source": [ "#| export\n", "@typedispatch\n", - "def plot_top_losses(x: MedImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, channel=0, indices=None, anatomical_plane=0, **kwargs):\n", - " #TODO: not tested yet\n", + "def plot_top_losses(x: MedImage, y: TensorMultiCategory, samples, outs, raws, \n", + " losses, nrows: int = None, ncols: int = None, figsize=None, \n", + " channel: int = 0, indices: int = None, \n", + " anatomical_plane: int = 0, **kwargs):\n", + " # TODO: not tested yet\n", " axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize)\n", - " for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs)\n", + "\n", + " for i, (ax, s) in enumerate(zip(axs, samples)):\n", + " s[0].show(ctx=ax, title=f'Image {i}', channel=channel, \n", + " indices=indices, anatomical_plane=anatomical_plane, **kwargs)\n", + "\n", " rows = get_empty_df(len(samples))\n", - " outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))\n", - " for i,l in enumerate([\"target\", \"predicted\", \"probabilities\", \"loss\"]):\n", - " rows = [b.show(ctx=r, label=l, channel=channel, indices=indices, anatomical_plane=anatomical_plane, **kwargs) for b,r in zip(outs.itemgot(i),rows)]\n", + " outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) \n", + " for s, o, r, l in zip(samples, outs, raws, losses))\n", + "\n", + " for i, l in enumerate([\"target\", \"predicted\", \"probabilities\", \"loss\"]):\n", + " rows = [b.show(ctx=r, label=l, channel=channel, indices=indices, \n", + " anatomical_plane=anatomical_plane, **kwargs) \n", + " for b, r in zip(outs.itemgot(i), rows)]\n", + "\n", " display_df(pd.DataFrame(rows))" ] } diff --git a/nbs/03_vision_augment.ipynb b/nbs/03_vision_augment.ipynb index dc60a59..8d52685 100644 --- a/nbs/03_vision_augment.ipynb +++ b/nbs/03_vision_augment.ipynb @@ -54,69 +54,43 @@ "source": [ "#| export\n", "class CustomDictTransform(ItemTransform):\n", - " '''Wrapper to perform an identical transformation on both image and target (if it is a mask) during training.'''\n", + " \"\"\"A class that serves as a wrapper to perform an identical transformation on both \n", + " the image and the target (if it's a mask).\n", + " \"\"\"\n", " \n", - " split_idx = 0\n", - " def __init__(self, aug): self.aug = aug\n", + " split_idx = 0 # Only perform transformations on training data. Use TTA() for transformations on validation data.\n", + "\n", + " def __init__(self, aug):\n", + " \"\"\"Constructs CustomDictTransform object.\n", + "\n", + " Args:\n", + " aug (Callable): Function to apply augmentation on the image.\n", + " \"\"\"\n", + " self.aug = aug\n", "\n", " def encodes(self, x):\n", - " '''Apply transformation to an image, and the same random transformation to the target if it is a mask.\n", + " \"\"\"\n", + " Applies the stored transformation to an image, and the same random transformation \n", + " to the target if it is a mask. If the target is not a mask, it returns the target as is.\n", "\n", " Args:\n", - " x: Contains image and target.\n", + " x (Tuple[MedImage, Union[MedMask, TensorCategory]]): A tuple containing the \n", + " image and the target.\n", "\n", " Returns:\n", - " MedImage: Transformed image data.\n", - " (MedMask, TensorCategory, ...todo): If the target is a mask, then return a transformed mask data. Otherwise, return target value.\n", - " '''\n", - "\n", + " Tuple[MedImage, Union[MedMask, TensorCategory]]: The transformed image and target. \n", + " If the target is a mask, it's transformed identically to the image. If the target \n", + " is not a mask, the original target is returned.\n", + " \"\"\"\n", " img, y_true = x\n", "\n", " if isinstance(y_true, (MedMask)):\n", - " aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img, affine=MedImage.affine_matrix), mask=tio.LabelMap(tensor=y_true, affine=MedImage.affine_matrix)))\n", + " aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img, affine=MedImage.affine_matrix), \n", + " mask=tio.LabelMap(tensor=y_true, affine=MedImage.affine_matrix)))\n", " return MedImage.create(aug['img'].data), MedMask.create(aug['mask'].data)\n", - " else:\n", - " aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img)))\n", - " return MedImage.create(aug['img'].data), y_true" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L14){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### CustomDictTransform\n", - "\n", - "> CustomDictTransform (aug)\n", - "\n", - "Wrapper to perform an identical transformation on both image and target (if it is a mask) during training." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L14){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### CustomDictTransform\n", - "\n", - "> CustomDictTransform (aug)\n", - "\n", - "Wrapper to perform an identical transformation on both image and target (if it is a mask) during training." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(CustomDictTransform, title_level=3)" + "\n", + " aug = self.aug(tio.Subject(img=tio.ScalarImage(tensor=img)))\n", + " return MedImage.create(aug['img'].data), y_true\n" ] }, { @@ -134,7 +108,7 @@ "source": [ "#| export\n", "def do_pad_or_crop(o, target_shape, padding_mode, mask_name, dtype=torch.Tensor):\n", - "\n", + " #TODO:refactorize\n", " pad_or_crop = tio.CropOrPad(target_shape=target_shape, padding_mode=padding_mode, mask_name=mask_name)\n", " return dtype(pad_or_crop(o))" ] @@ -147,54 +121,19 @@ "source": [ "#| export \n", "class PadOrCrop(DisplayedTransform):\n", - " '''Resize image using TorchIO `CropOrPad`.'''\n", + " \"\"\"Resize image using TorchIO `CropOrPad`.\"\"\"\n", + " \n", + " order = 0\n", "\n", - " order=0\n", " def __init__(self, size, padding_mode=0, mask_name=None):\n", - " if not is_listy(size): size=[size,size,size]\n", - " self.size, self.padding_mode, self.mask_name = size, padding_mode, mask_name\n", + " if not is_listy(size): \n", + " size = [size, size, size]\n", + " self.pad_or_crop = tio.CropOrPad(target_shape=size,\n", + " padding_mode=padding_mode, \n", + " mask_name=mask_name)\n", "\n", - " def encodes(self, o:(MedImage, MedMask)):\n", - " return do_pad_or_crop(o,target_shape=self.size, padding_mode=self.padding_mode, mask_name=self.mask_name, dtype=type(o))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L47){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### PadOrCrop\n", - "\n", - "> PadOrCrop (size, padding_mode=0, mask_name=None)\n", - "\n", - "Resize image using TorchIO `CropOrPad`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L47){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### PadOrCrop\n", - "\n", - "> PadOrCrop (size, padding_mode=0, mask_name=None)\n", - "\n", - "Resize image using TorchIO `CropOrPad`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(PadOrCrop, title_level=3)" + " def encodes(self, o: (MedImage, MedMask)):\n", + " return type(o)(self.pad_or_crop(o))" ] }, { @@ -203,76 +142,27 @@ "metadata": {}, "outputs": [], "source": [ - "#| export\n", - "def _do_z_normalization(o, masking_method, channel_wise):\n", + "# | export\n", + "class ZNormalization(DisplayedTransform):\n", + " \"\"\"Apply TorchIO `ZNormalization`.\"\"\"\n", "\n", - " z_normalization = tio.ZNormalization(masking_method=masking_method)\n", - " normalized_tensor = torch.zeros(o.shape)\n", + " order = 0\n", "\n", - " if channel_wise:\n", - " for idx, c in enumerate(o): \n", - " normalized_tensor[idx] = z_normalization(c[None])[0]\n", - " \n", - " else: normalized_tensor = z_normalization(o)\n", + " def __init__(self, masking_method=None, channel_wise=True):\n", + " self.z_normalization = tio.ZNormalization(masking_method=masking_method)\n", + " self.channel_wise = channel_wise\n", "\n", - " return normalized_tensor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "class ZNormalization(DisplayedTransform):\n", - " '''Apply TorchIO `ZNormalization`.'''\n", + " def encodes(self, o: MedImage):\n", + " return MedImage.create(self._do_z_normalization(o))\n", "\n", - " order=0\n", - " def __init__(self, masking_method=None, channel_wise=True):\n", - " self.masking_method, self.channel_wise = masking_method, channel_wise\n", + " def encodes(self, o: MedMask):\n", + " return o\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_z_normalization(o, self.masking_method, self.channel_wise))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L73){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### ZNormalization\n", - "\n", - "> ZNormalization (masking_method=None, channel_wise=True)\n", - "\n", - "Apply TorchIO `ZNormalization`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L73){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### ZNormalization\n", - "\n", - "> ZNormalization (masking_method=None, channel_wise=True)\n", - "\n", - "Apply TorchIO `ZNormalization`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(ZNormalization, title_level=3)" + " def _do_z_normalization(self, o):\n", + " if self.channel_wise:\n", + " return torch.stack([self.z_normalization(c[None])[0] for c in o])\n", + " else: \n", + " return self.z_normalization(o)" ] }, { @@ -294,45 +184,6 @@ " return MedMask.create(o)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L84){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### BraTSMaskConverter\n", - "\n", - "> BraTSMaskConverter (enc=None, dec=None, split_idx=None, order=None)\n", - "\n", - "Convert BraTS masks." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L84){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### BraTSMaskConverter\n", - "\n", - "> BraTSMaskConverter (enc=None, dec=None, split_idx=None, order=None)\n", - "\n", - "Convert BraTS masks." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(BraTSMaskConverter, title_level=3)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -345,35 +196,14 @@ "\n", " order=1\n", "\n", - " def encodes(self, o:(MedImage)): return o\n", + " def encodes(self, o: MedImage): \n", + " return o\n", "\n", - " def encodes(self, o:(MedMask)):\n", + " def encodes(self, o: MedMask):\n", " o = torch.where(o>0, 1., 0)\n", " return MedMask.create(o)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BraTSMaskConverter, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_ghosting(o, intensity, p):\n", - " \n", - " add_ghosts = tio.RandomGhosting(intensity=intensity, p=p)\n", - " return add_ghosts(o)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -382,67 +212,18 @@ "source": [ "#| export\n", "class RandomGhosting(DisplayedTransform):\n", - " '''Apply TorchIO `RandomGhosting`.'''\n", - "\n", - " split_idx,order=0,1\n", + " \"\"\"Apply TorchIO `RandomGhosting`.\"\"\"\n", + " \n", + " split_idx, order = 0, 1\n", "\n", - " def __init__(self, intensity =(0.5, 1), p=0.5):\n", - " self.intensity, self.p = intensity, p\n", + " def __init__(self, intensity=(0.5, 1), p=0.5):\n", + " self.add_ghosts = tio.RandomGhosting(intensity=intensity, p=p)\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_ghosting(o, self.intensity, self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L102){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomGhosting\n", - "\n", - "> RandomGhosting (intensity=(0.5, 1), p=0.5)\n", - "\n", - "Apply TorchIO `RandomGhosting`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L102){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomGhosting\n", - "\n", - "> RandomGhosting (intensity=(0.5, 1), p=0.5)\n", - "\n", - "Apply TorchIO `RandomGhosting`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomGhosting, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_spike(o, num_spikes, intensity, p):\n", + " def encodes(self, o: MedImage):\n", + " return MedImage.create(self.add_ghosts(o))\n", "\n", - " add_spikes = tio.RandomSpike(num_spikes=num_spikes, intensity=intensity, p=p)\n", - " return add_spikes(o) #return torch tensor" + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -458,62 +239,13 @@ " split_idx,order=0,1\n", "\n", " def __init__(self, num_spikes=1, intensity=(1, 3), p=0.5):\n", - " self.num_spikes, self.intensity, self.p = num_spikes, intensity, p\n", + " self.add_spikes = tio.RandomSpike(num_spikes=num_spikes, intensity=intensity, p=p)\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_spike(o, self.num_spikes, self.intensity, self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L120){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomSpike\n", - "\n", - "> RandomSpike (num_spikes=1, intensity=(1, 3), p=0.5)\n", - "\n", - "Apply TorchIO `RandomSpike`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L120){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomSpike\n", - "\n", - "> RandomSpike (num_spikes=1, intensity=(1, 3), p=0.5)\n", - "\n", - "Apply TorchIO `RandomSpike`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomSpike, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_noise(o, mean, std, p):\n", - "\n", - " add_noise = tio.RandomNoise(mean=mean, std=std, p=p)\n", - " return add_noise(o) #return torch tensor" + " def encodes(self, o:MedImage): \n", + " return MedImage.create(self.add_spikes(o))\n", + " \n", + " def encodes(self, o:MedMask):\n", + " return o" ] }, { @@ -529,62 +261,13 @@ " split_idx,order=0,1\n", "\n", " def __init__(self, mean=0, std=(0, 0.25), p=0.5):\n", - " self.mean, self.std, self.p = mean, std, p\n", + " self.add_noise = tio.RandomNoise(mean=mean, std=std, p=p)\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_noise(o, mean=self.mean, std=self.std, p=self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L138){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomNoise\n", - "\n", - "> RandomNoise (mean=0, std=(0, 0.25), p=0.5)\n", - "\n", - "Apply TorchIO `RandomNoise`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L138){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomNoise\n", - "\n", - "> RandomNoise (mean=0, std=(0, 0.25), p=0.5)\n", - "\n", - "Apply TorchIO `RandomNoise`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomNoise, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_biasfield(o, coefficients, order, p):\n", - "\n", - " add_biasfield = tio.RandomBiasField(coefficients=coefficients, order=order, p=p)\n", - " return add_biasfield(o) #return torch tensor" + " def encodes(self, o: MedImage): \n", + " return MedImage.create(self.add_noise(o))\n", + " \n", + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -600,62 +283,13 @@ " split_idx,order=0,1\n", "\n", " def __init__(self, coefficients=0.5, order=3, p=0.5):\n", - " self.coefficients, self.order, self.p = coefficients, order, p\n", - "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_biasfield(o, coefficients=self.coefficients, order=self.order, p=self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L156){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomBiasField\n", - "\n", - "> RandomBiasField (coefficients=0.5, order=3, p=0.5)\n", - "\n", - "Apply TorchIO `RandomBiasField`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L156){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomBiasField\n", - "\n", - "> RandomBiasField (coefficients=0.5, order=3, p=0.5)\n", - "\n", - "Apply TorchIO `RandomBiasField`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomBiasField, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_blur(o, std, p):\n", + " self.add_biasfield = tio.RandomBiasField(coefficients=coefficients, order=order, p=p)\n", "\n", - " add_blur = tio.RandomBlur(std=std, p=p)\n", - " return add_blur(o) " + " def encodes(self, o: MedImage): \n", + " return MedImage.create(self.add_biasfield(o))\n", + " \n", + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -671,62 +305,13 @@ " split_idx,order=0,1\n", "\n", " def __init__(self, std=(0, 2), p=0.5):\n", - " self.std, self.p = std, p\n", - "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_blur(o, std=self.std, p=self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L174){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomBlur\n", - "\n", - "> RandomBlur (std=(0, 2), p=0.5)\n", - "\n", - "Apply TorchIO `RandomBiasField`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L174){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomBlur\n", - "\n", - "> RandomBlur (std=(0, 2), p=0.5)\n", - "\n", - "Apply TorchIO `RandomBiasField`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomBlur, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_gamma(o, log_gamma, p):\n", - "\n", - " add_gamma = tio.RandomGamma(log_gamma=log_gamma, p=p)\n", - " return add_gamma(o) " + " self.add_blur = tio.RandomBlur(std=std, p=p)\n", + " \n", + " def encodes(self, o: MedImage): \n", + " return MedImage.create(self.add_blur(o))\n", + " \n", + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -743,62 +328,13 @@ " split_idx,order=0,1\n", "\n", " def __init__(self, log_gamma=(-0.3, 0.3), p=0.5):\n", - " self.log_gamma, self.p = log_gamma, p\n", + " self.add_gamma = tio.RandomGamma(log_gamma=log_gamma, p=p)\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_gamma(o, log_gamma=self.log_gamma, p=self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L192){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomGamma\n", - "\n", - "> RandomGamma (log_gamma=(-0.3, 0.3), p=0.5)\n", - "\n", - "Apply TorchIO `RandomGamma`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L192){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomGamma\n", - "\n", - "> RandomGamma (log_gamma=(-0.3, 0.3), p=0.5)\n", - "\n", - "Apply TorchIO `RandomGamma`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomGamma, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def _do_rand_motion(o, degrees, translation, num_transforms, image_interpolation, p):\n", - "\n", - " add_motion = tio.RandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation, p=p)\n", - " return add_motion(o) #return torch tensor" + " def encodes(self, o: MedImage): \n", + " return MedImage.create(self.add_gamma(o))\n", + " \n", + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -809,56 +345,31 @@ "source": [ "#| export\n", "class RandomMotion(DisplayedTransform):\n", - " '''Apply TorchIO `RandomMotion`.'''\n", + " \"\"\"Apply TorchIO `RandomMotion`.\"\"\"\n", "\n", - " split_idx,order=0,1\n", + " split_idx, order = 0, 1\n", "\n", - " def __init__(self, degrees=10, translation=10, num_transforms=2, image_interpolation='linear', p=0.5):\n", - " self.degrees,self.translation, self.num_transforms, self.image_interpolation, self.p = degrees,translation, num_transforms, image_interpolation, p\n", + " def __init__(\n", + " self, \n", + " degrees=10, \n", + " translation=10, \n", + " num_transforms=2, \n", + " image_interpolation='linear', \n", + " p=0.5\n", + " ):\n", + " self.add_motion = tio.RandomMotion(\n", + " degrees=degrees, \n", + " translation=translation, \n", + " num_transforms=num_transforms, \n", + " image_interpolation=image_interpolation, \n", + " p=p\n", + " )\n", "\n", - " def encodes(self, o:(MedImage)): return MedImage.create(_do_rand_motion(o, degrees=self.degrees,translation=self.translation, num_transforms=self.num_transforms, image_interpolation=self.image_interpolation, p=self.p))\n", - " def encodes(self, o:(MedMask)):return o" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L211){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomMotion\n", - "\n", - "> RandomMotion (degrees=10, translation=10, num_transforms=2,\n", - "> image_interpolation='linear', p=0.5)\n", - "\n", - "Apply TorchIO `RandomMotion`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L211){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomMotion\n", - "\n", - "> RandomMotion (degrees=10, translation=10, num_transforms=2,\n", - "> image_interpolation='linear', p=0.5)\n", - "\n", - "Apply TorchIO `RandomMotion`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomMotion, title_level=3)" + " def encodes(self, o: MedImage):\n", + " return MedImage.create(self.add_motion(o))\n", + "\n", + " def encodes(self, o: MedMask):\n", + " return o" ] }, { @@ -874,53 +385,18 @@ "metadata": {}, "outputs": [], "source": [ - "#| export\n", + "# | export\n", "class RandomElasticDeformation(CustomDictTransform):\n", - " '''Apply TorchIO `RandomElasticDeformation`.'''\n", + " \"\"\"Apply TorchIO `RandomElasticDeformation`.\"\"\"\n", "\n", - " def __init__(self,num_control_points=7, max_displacement=7.5, image_interpolation='linear', p=0.5): \n", - " super().__init__(tio.RandomElasticDeformation(num_control_points=num_control_points, max_displacement=max_displacement, image_interpolation=image_interpolation, p=p))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L223){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomElasticDeformation\n", - "\n", - "> RandomElasticDeformation (num_control_points=7, max_displacement=7.5,\n", - "> image_interpolation='linear', p=0.5)\n", - "\n", - "Apply TorchIO `RandomElasticDeformation`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L223){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomElasticDeformation\n", - "\n", - "> RandomElasticDeformation (num_control_points=7, max_displacement=7.5,\n", - "> image_interpolation='linear', p=0.5)\n", - "\n", - "Apply TorchIO `RandomElasticDeformation`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomElasticDeformation, title_level=3,)" + " def __init__(self, num_control_points=7, max_displacement=7.5,\n", + " image_interpolation='linear', p=0.5):\n", + " \n", + " super().__init__(tio.RandomElasticDeformation(\n", + " num_control_points=num_control_points,\n", + " max_displacement=max_displacement,\n", + " image_interpolation=image_interpolation,\n", + " p=p))" ] }, { @@ -929,53 +405,21 @@ "metadata": {}, "outputs": [], "source": [ - "#| export \n", + "# | export\n", "class RandomAffine(CustomDictTransform):\n", - " '''Apply TorchIO `RandomAffine`.'''\n", + " \"\"\"Apply TorchIO `RandomAffine`.\"\"\"\n", "\n", - " def __init__(self, scales=0, degrees=10, translation=0, isotropic=False, image_interpolation='linear', default_pad_value=0., p=0.5): \n", - " super().__init__(tio.RandomAffine(scales=scales, degrees=degrees, translation=translation, isotropic=isotropic, image_interpolation=image_interpolation, default_pad_value=default_pad_value, p=p))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L230){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomAffine\n", - "\n", - "> RandomAffine (scales=0, degrees=10, translation=0, isotropic=False,\n", - "> image_interpolation='linear', default_pad_value=0.0, p=0.5)\n", - "\n", - "Apply TorchIO `RandomAffine`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L230){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomAffine\n", - "\n", - "> RandomAffine (scales=0, degrees=10, translation=0, isotropic=False,\n", - "> image_interpolation='linear', default_pad_value=0.0, p=0.5)\n", - "\n", - "Apply TorchIO `RandomAffine`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomAffine, title_level=3)" + " def __init__(self, scales=0, degrees=10, translation=0, isotropic=False,\n", + " image_interpolation='linear', default_pad_value=0., p=0.5):\n", + " \n", + " super().__init__(tio.RandomAffine(\n", + " scales=scales,\n", + " degrees=degrees,\n", + " translation=translation,\n", + " isotropic=isotropic,\n", + " image_interpolation=image_interpolation,\n", + " default_pad_value=default_pad_value,\n", + " p=p))" ] }, { @@ -984,53 +428,14 @@ "metadata": {}, "outputs": [], "source": [ - "#| export \n", + "# | export\n", "class RandomFlip(CustomDictTransform):\n", - " '''Apply TorchIO `RandomFlip`.'''\n", + " \"\"\"Apply TorchIO `RandomFlip`.\"\"\"\n", "\n", " def __init__(self, axes='LR', p=0.5):\n", " super().__init__(tio.RandomFlip(axes=axes, flip_probability=p))" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L237){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomFlip\n", - "\n", - "> RandomFlip (axes='LR', p=0.5)\n", - "\n", - "Apply TorchIO `RandomFlip`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L237){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### RandomFlip\n", - "\n", - "> RandomFlip (axes='LR', p=0.5)\n", - "\n", - "Apply TorchIO `RandomFlip`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(RandomFlip, title_level=3)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -1039,51 +444,12 @@ "source": [ "#| export\n", "class OneOf(CustomDictTransform):\n", - " '''Apply only one of the given transforms using TorchIO `OneOf`.'''\n", + " \"\"\"Apply only one of the given transforms using TorchIO `OneOf`.\"\"\"\n", "\n", " def __init__(self, transform_dict, p=1):\n", " super().__init__(tio.OneOf(transform_dict, p=p))" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L244){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### OneOf\n", - "\n", - "> OneOf (transform_dict, p=1)\n", - "\n", - "Apply only one of the given transforms using TorchIO `OneOf`." - ], - "text/plain": [ - "---\n", - "\n", - "[source](https://github.com/MMIV-ML/fastMONAI/blob/master/fastMONAI/vision_augmentation.py#L244){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", - "\n", - "### OneOf\n", - "\n", - "> OneOf (transform_dict, p=1)\n", - "\n", - "Apply only one of the given transforms using TorchIO `OneOf`." - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(OneOf, title_level=3)" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/nbs/04_vision_loss_functions.ipynb b/nbs/04_vision_loss_functions.ipynb index 69cc6d9..b70a7dc 100644 --- a/nbs/04_vision_loss_functions.ipynb +++ b/nbs/04_vision_loss_functions.ipynb @@ -37,37 +37,59 @@ { "cell_type": "code", "execution_count": null, - "id": "b589b6c4-b620-428c-abcf-bcf4e7aa3a80", + "id": "e0c0c220-aaeb-46c6-8d18-f72cd9da0555", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class CustomLoss:\n", - " '''Wrapper to get show_results to work.'''\n", + " \"\"\"A custom loss wrapper class for loss functions to allow them to work with\n", + " the 'show_results' method in fastai. \n", + " \"\"\"\n", "\n", " def __init__(self, loss_func):\n", + " \"\"\"Constructs CustomLoss object.\n", + " \n", + " Args:\n", + " loss_func: The loss function to be wrapped.\n", + " \"\"\"\n", + " \n", " self.loss_func = loss_func\n", "\n", " def __call__(self, pred, targ):\n", - " if isinstance(pred, MedBase): pred, targ = torch.Tensor(pred.cpu()), torch.Tensor(targ.cpu().float())\n", + " \"\"\"Computes the loss for given predictions and targets.\n", + "\n", + " Args:\n", + " pred: The predicted outputs.\n", + " targ: The ground truth targets.\n", + "\n", + " Returns:\n", + " The computed loss.\n", + " \"\"\"\n", + " \n", + " if isinstance(pred, MedBase):\n", + " pred, targ = torch.Tensor(pred.cpu()), torch.Tensor(targ.cpu().float())\n", + " \n", " return self.loss_func(pred, targ)\n", "\n", " def activation(self, x):\n", " return x\n", " \n", - " def decodes(self, x):\n", - " '''Converts model output to target format.\n", - "\n", + " def decodes(self, x) -> torch.Tensor:\n", + " \"\"\"Converts model output to target format.\n", + " \n", " Args:\n", - " x: Activations for each class [B, C, W, H, D]\n", + " x: Activations for each class with dimensions [B, C, W, H, D].\n", "\n", " Returns:\n", - " torch.Tensor: Predicted mask.\n", - " '''\n", - "\n", + " The predicted mask.\n", + " \"\"\"\n", + " \n", " n_classes = x.shape[1]\n", - " if n_classes == 1: x = pred_to_binary_mask(x)\n", - " else: x,_ = batch_pred_to_multiclass_mask(x)\n", + " if n_classes == 1: \n", + " x = pred_to_binary_mask(x)\n", + " else: \n", + " x,_ = batch_pred_to_multiclass_mask(x)\n", "\n", " return x" ] @@ -75,16 +97,15 @@ { "cell_type": "code", "execution_count": null, - "id": "c00d0530-ad8b-46fd-a38a-09fba5dd6f9a", + "id": "5052c7bc-3d9a-4e34-8b64-bceaf2fdc7b6", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class TverskyFocalLoss(_Loss):\n", " \"\"\"\n", - " Compute both Dice loss and Focal Loss, and return the weighted sum of these two losses.\n", - " The details of Dice loss is shown in ``monai.losses.DiceLoss``.\n", - " The details of Focal Loss is shown in ``monai.losses.FocalLoss``.\n", + " Compute Tversky loss with a focus parameter, gamma, applied.\n", + " The details of Tversky loss is shown in ``monai.losses.TverskyLoss``.\n", " \"\"\"\n", "\n", " def __init__(\n", @@ -93,47 +114,47 @@ " to_onehot_y: bool = False,\n", " sigmoid: bool = False,\n", " softmax: bool = False,\n", - " reduction: str = \"mean\",\n", " gamma: float = 2,\n", - " #focal_weight: (float, int, torch.Tensor) = None,\n", - " #lambda_dice: float = 1.0,\n", - " #lambda_focal: float = 1.0,\n", - " alpha = 0.5, \n", - " beta = 0.99\n", - " ) -> None:\n", - "\n", + " alpha: float = 0.5, \n", + " beta: float = 0.99):\n", + " \"\"\"\n", + " Args:\n", + " include_background: if to calculate loss for the background class.\n", + " to_onehot_y: whether to convert `y` into one-hot format.\n", + " sigmoid: if True, apply a sigmoid function to the prediction.\n", + " softmax: if True, apply a softmax function to the prediction.\n", + " gamma: the focal parameter, it modulates the loss with regards to \n", + " how far the prediction is from target.\n", + " alpha: the weight of false positive in Tversky loss calculation.\n", + " beta: the weight of false negative in Tversky loss calculation.\n", + " \"\"\"\n", + " \n", " super().__init__()\n", - " self.tversky = TverskyLoss(to_onehot_y=to_onehot_y, include_background=include_background, sigmoid=sigmoid, softmax=softmax, alpha=alpha, beta=beta)\n", - " #self.focal = FocalLoss(to_onehot_y=to_onehot_y, include_background=include_background, gamma=gamma, weight=focal_weight, reduction=reduction)\n", - " \n", - " #if lambda_dice < 0.0: raise ValueError(\"lambda_dice should be no less than 0.0.\")\n", - " #if lambda_focal < 0.0: raise ValueError(\"lambda_focal should be no less than 0.0.\")\n", - " #self.lambda_dice = lambda_dice\n", - " #self.lambda_focal = lambda_focal\n", - " self.to_onehot_y = to_onehot_y\n", + " self.tversky = TverskyLoss(\n", + " to_onehot_y=to_onehot_y, \n", + " include_background=include_background, \n", + " sigmoid=sigmoid, \n", + " softmax=softmax, \n", + " alpha=alpha, \n", + " beta=beta\n", + " )\n", " self.gamma = gamma\n", - " self.include_background = include_background\n", "\n", " def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n", " \"\"\"\n", " Args:\n", - " input: the shape should be BNH[WD]. The input should be the original logits\n", - " due to the restriction of ``monai.losses.FocalLoss``.\n", - " target: the shape should be BNH[WD] or B1H[WD].\n", + " input: the shape should be [B, C, W, H, D]. The input should be the original logits.\n", + " target: the shape should be[B, C, W, H, D].\n", + "\n", " Raises:\n", " ValueError: When number of dimensions for input and target are different.\n", - " ValueError: When number of channels for target is neither 1 nor the same as input.\n", " \"\"\"\n", " if len(input.shape) != len(target.shape):\n", - " raise ValueError(\"the number of dimensions for input and target should be the same.\")\n", - "\n", - " n_pred_ch = input.shape[1]\n", + " raise ValueError(\"The number of dimensions for input and target should be the same.\")\n", "\n", " tversky_loss = self.tversky(input, target)\n", - " #focal_loss = self.focal(input, target)\n", - " total_loss: torch.Tensor = 1 - ((1 - tversky_loss)**self.gamma) #tversky_loss\n", - " #print(total_loss,total_loss.shape)\n", - " #tversky_loss + focal_loss\n", + " total_loss: torch.Tensor = 1 - ((1 - tversky_loss)**self.gamma)\n", + "\n", " return total_loss" ] } diff --git a/nbs/05_vision_metrics.ipynb b/nbs/05_vision_metrics.ipynb index 3b6e843..5ea3bbc 100644 --- a/nbs/05_vision_metrics.ipynb +++ b/nbs/05_vision_metrics.ipynb @@ -41,8 +41,8 @@ "outputs": [], "source": [ "#| export\n", - "def calculate_dsc(pred, targ):\n", - " ''' MONAI `compute_meandice`'''\n", + "def calculate_dsc(pred: torch.Tensor, targ: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"MONAI `compute_meandice`\"\"\"\n", "\n", " return torch.Tensor([compute_dice(p[None], t[None]) for p, t in list(zip(pred,targ))])" ] @@ -55,8 +55,8 @@ "outputs": [], "source": [ "#| export\n", - "def calculate_haus(pred, targ):\n", - " ''' MONAI `compute_hausdorff_distance`'''\n", + "def calculate_haus(pred: torch.Tensor, targ: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"MONAI `compute_hausdorff_distance`\"\"\"\n", "\n", " return torch.Tensor([compute_hausdorff_distance(p[None], t[None]) for p, t in list(zip(pred,targ))])" ] @@ -64,16 +64,21 @@ { "cell_type": "code", "execution_count": null, - "id": "a6ab11e4-1b52-4c53-841f-e0ebbf40e2a7", + "id": "430d64f8-8cd9-4a88-ad20-5f73bebbf12f", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def binary_dice_score(act, # Activation tensor [B, C, W, H, D]\n", - " targ # Target masks [B, C, W, H, D]\n", - " ) -> torch.Tensor:\n", - " '''Calculate the mean Dice score for binary semantic segmentation tasks.'''\n", - "\n", + "def binary_dice_score(act: torch.tensor, targ: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Calculates the mean Dice score for binary semantic segmentation tasks.\n", + " \n", + " Args:\n", + " act: Activation tensor with dimensions [B, C, W, H, D].\n", + " targ: Target masks with dimensions [B, C, W, H, D].\n", + "\n", + " Returns:\n", + " Mean Dice score.\n", + " \"\"\"\n", " pred = pred_to_binary_mask(act)\n", " dsc = calculate_dsc(pred.cpu(), targ.cpu())\n", "\n", @@ -83,24 +88,29 @@ { "cell_type": "code", "execution_count": null, - "id": "38308293-6ebf-4cbe-b8d3-95bba2ed650e", + "id": "48ba4382-eeb0-46d7-8f84-515313c7c27c", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def multi_dice_score(act, # Activation values [B, C, W, H, D]\n", - " targ # Target masks [B, C, W, H, D]\n", - " ) -> torch.Tensor:\n", - " '''Calculate the mean Dice score for each class in multi-class semantic segmentation tasks.'''\n", + "def multi_dice_score(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Calculate the mean Dice score for each class in multi-class semantic \n", + " segmentation tasks.\n", "\n", + " Args:\n", + " act: Activation tensor with dimensions [B, C, W, H, D].\n", + " targ: Target masks with dimensions [B, C, W, H, D].\n", "\n", + " Returns:\n", + " Mean Dice score for each class.\n", + " \"\"\"\n", " pred, n_classes = batch_pred_to_multiclass_mask(act)\n", " binary_dice_scores = []\n", "\n", " for c in range(1, n_classes):\n", - " c_pred, c_targ = torch.where(pred==c, 1, 0), torch.where(targ==c, 1, 0)\n", + " c_pred, c_targ = torch.where(pred == c, 1, 0), torch.where(targ == c, 1, 0)\n", " dsc = calculate_dsc(c_pred, c_targ)\n", - " binary_dice_scores.append(np.nanmean(dsc)) #TODO update torch to get torch.nanmean() to work\n", + " binary_dice_scores.append(np.nanmean(dsc)) # #TODO update torch to get torch.nanmean() to work\n", "\n", " return torch.Tensor(binary_dice_scores)" ] @@ -113,10 +123,17 @@ "outputs": [], "source": [ "#| export\n", - "def binary_hausdorff_distance(act, # Activation tensor [B, C, W, H, D]\n", - " targ # Target masks [B, C, W, H, D]\n", - " ) -> torch.Tensor:\n", - " '''Calculate the mean Hausdorff distance for binary semantic segmentation tasks.'''\n", + "def binary_hausdorff_distance(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Calculate the mean Hausdorff distance for binary semantic segmentation tasks.\n", + " \n", + " Args:\n", + " act: Activation tensor with dimensions [B, C, W, H, D].\n", + " targ: Target masks with dimensions [B, C, W, H, D].\n", + "\n", + " Returns:\n", + " Mean Hausdorff distance.\n", + " \"\"\"\n", + " \n", "\n", " pred = pred_to_binary_mask(act)\n", "\n", @@ -132,10 +149,16 @@ "outputs": [], "source": [ "#| export\n", - "def multi_hausdorff_distance(act, # Activation tensor [B, C, W, H, D]\n", - " targ # Target masks [B, C, W, H, D]\n", - " ) -> torch.Tensor :\n", - " '''Calculate the mean Hausdorff distance for each class in multi-class semantic segmentation tasks.'''\n", + "def multi_hausdorff_distance(act: torch.Tensor, targ: torch.Tensor) -> torch.Tensor :\n", + " \"\"\"Calculate the mean Hausdorff distance for each class in multi-class semantic segmentation tasks.\n", + " \n", + " Args:\n", + " act: Activation tensor with dimensions [B, C, W, H, D].\n", + " targ: Target masks with dimensions [B, C, W, H, D].\n", + "\n", + " Returns:\n", + " Mean Hausdorff distance for each class.\n", + " \"\"\"\n", "\n", " pred, n_classes = batch_pred_to_multiclass_mask(act)\n", " binary_haus = []\n", diff --git a/nbs/06_vision_inference.ipynb b/nbs/06_vision_inference.ipynb index f25796d..6691d00 100644 --- a/nbs/06_vision_inference.ipynb +++ b/nbs/06_vision_inference.ipynb @@ -58,15 +58,23 @@ { "cell_type": "code", "execution_count": null, - "id": "0ce88606-6bfc-4d97-9e1e-235df1df57cd", + "id": "75a1169f-7385-4c48-9a24-51994c80732c", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def _do_resize(o, target_shape, image_interpolation='linear', label_interpolation='nearest'):\n", - " '''Resample images so the output shape matches the given target shape.'''\n", + "def _do_resize(o, target_shape, image_interpolation='linear', \n", + " label_interpolation='nearest'):\n", + " \"\"\"\n", + " Resample images so the output shape matches the given target shape.\n", + " \"\"\"\n", "\n", - " resize = Resize(target_shape, image_interpolation=image_interpolation, label_interpolation=label_interpolation)\n", + " resize = Resize(\n", + " target_shape, \n", + " image_interpolation=image_interpolation, \n", + " label_interpolation=label_interpolation\n", + " )\n", + " \n", " return resize(o)" ] }, @@ -78,20 +86,27 @@ "outputs": [], "source": [ "#| export\n", - "def inference(learn_inf, reorder, resample, fn:(Path,str)='', save_path:(str,Path)=None, org_img=None, input_img=None, org_size=None): \n", - " '''Predict on new data using exported model''' \n", + "def inference(learn_inf, reorder, resample, fn: (str, Path) = '',\n", + " save_path: (str, Path) = None, org_img=None, input_img=None,\n", + " org_size=None): \n", + " \"\"\"Predict on new data using exported model.\"\"\" \n", + " \n", " if None in [org_img, input_img, org_size]: \n", - " org_img, input_img, org_size = med_img_reader(fn, reorder, resample, only_tensor=False)\n", - " else: org_img, input_img = copy(org_img), copy(input_img)\n", + " org_img, input_img, org_size = med_img_reader(fn, reorder, resample, \n", + " only_tensor=False)\n", + " else: \n", + " org_img, input_img = copy(org_img), copy(input_img)\n", " \n", - " pred, *_ = learn_inf.predict(input_img.data);\n", + " pred, *_ = learn_inf.predict(input_img.data)\n", " \n", - " pred_mask = do_pad_or_crop(pred.float(), input_img.shape[1:], padding_mode=0, mask_name=None)\n", + " pred_mask = do_pad_or_crop(pred.float(), input_img.shape[1:], padding_mode=0, \n", + " mask_name=None)\n", " input_img.set_data(pred_mask)\n", " \n", " input_img = _do_resize(input_img, org_size, image_interpolation='nearest')\n", " \n", - " reoriented_array = _to_original_orientation(input_img.as_sitk(), ('').join(org_img.orientation))\n", + " reoriented_array = _to_original_orientation(input_img.as_sitk(), \n", + " ('').join(org_img.orientation))\n", " \n", " org_img.set_data(reoriented_array)\n", "\n", @@ -119,12 +134,10 @@ "outputs": [], "source": [ "#| export\n", - "def refine_binary_pred_mask(\n", - " pred_mask,\n", - " remove_size: (int, float) = None,\n", - " percentage: float = 0.2,\n", - " verbose: bool = False\n", - "):\n", + "def refine_binary_pred_mask(pred_mask, \n", + " remove_size: (int, float) = None,\n", + " percentage: float = 0.2,\n", + " verbose: bool = False) -> np.ndarray:\n", " \"\"\"Removes small objects from the predicted binary mask.\n", "\n", " Args:\n", @@ -137,6 +150,7 @@ " Returns:\n", " The processed mask with small objects removed.\n", " \"\"\"\n", + " \n", " labeled_mask, n_components = label(pred_mask)\n", "\n", " if verbose:\n", diff --git a/nbs/07_utils.ipynb b/nbs/07_utils.ipynb index 331751f..bb720a8 100644 --- a/nbs/07_utils.ipynb +++ b/nbs/07_utils.ipynb @@ -45,7 +45,7 @@ " reorder:bool,\n", " resample:(int,list),\n", " ) -> None:\n", - " '''Save variable values in a pickle file.'''\n", + " \"\"\"Save variable values in a pickle file.\"\"\"\n", " \n", " var_vals = [size, reorder, resample]\n", " \n", @@ -56,18 +56,21 @@ { "cell_type": "code", "execution_count": null, - "id": "0e64d6c3-601e-4646-883f-80e72aebd74e", + "id": "c2db5512-171c-4dfd-a26e-561b773a6069", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def load_variables(pkl_fn # Filename of the pickle file\n", - " ):\n", - " '''Load stored variable values from a pickle file.\n", + "def load_variables(pkl_fn: (str, Path)) -> Any:\n", + " \"\"\"\n", + " Loads stored variable values from a pickle file.\n", "\n", - " Returns: A list of variable values.\n", - " '''\n", + " Args:\n", + " pkl_fn: File path of the pickle file to be loaded.\n", "\n", + " Returns:\n", + " The deserialized value of the pickled data.\n", + " \"\"\"\n", " with open(pkl_fn, 'rb') as f:\n", " return pickle.load(f)" ] @@ -81,7 +84,7 @@ "source": [ "#| export\n", "def print_colab_gpu_info(): \n", - " '''Check if we have a GPU attached to the runtime.'''\n", + " \"\"\"Check if we have a GPU attached to the runtime.\"\"\"\n", " \n", " colab_gpu_msg =(f\"{'#'*80}\\n\"\n", " \"Remember to attach a GPU to your Colab Runtime:\"\n", diff --git a/nbs/08_dataset_info.ipynb b/nbs/08_dataset_info.ipynb index 6fff712..1b79db8 100644 --- a/nbs/08_dataset_info.ipynb +++ b/nbs/08_dataset_info.ipynb @@ -51,23 +51,28 @@ { "cell_type": "code", "execution_count": null, - "id": "3b9d5a24-5330-4fd4-b507-3d21799fe864", + "id": "3593203e-e5e1-4564-94d4-8e31b7048cf9", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "class MedDataset():\n", - " '''A class to extract and present information about the dataset.'''\n", - "\n", - " def __init__(self, path=None, # Path to the image folder\n", - " postfix:str='', # Specify the file type if there are different files in the folder\n", - " img_list:list=None, # Alternatively pass in a list with image paths\n", - " reorder:bool=False, # Whether to reorder the data to be closest to canonical (RAS+) orientation\n", - " dtype:(MedImage, MedMask)=MedImage, # Load data as datatype\n", - " max_workers:int=1 # The number of worker threads\n", - " ):\n", - " '''Constructs all the necessary attributes for the MedDataset object.'''\n", + "class MedDataset:\n", + " \"\"\"A class to extract and present information about the dataset.\"\"\"\n", "\n", + " def __init__(self, path=None, postfix: str = '', img_list: list = None,\n", + " reorder: bool = False, dtype: (MedImage, MedMask) = MedImage,\n", + " max_workers: int = 1):\n", + " \"\"\"Constructs MedDataset object.\n", + "\n", + " Args:\n", + " path (str, optional): Path to the image folder.\n", + " postfix (str, optional): Specify the file type if there are different files in the folder.\n", + " img_list (List[str], optional): Alternatively, pass in a list with image paths.\n", + " reorder (bool, optional): Whether to reorder the data to be closest to canonical (RAS+) orientation.\n", + " dtype (Union[MedImage, MedMask], optional): Load data as datatype. Default is MedImage.\n", + " max_workers (int, optional): The number of worker threads. Default is 1.\n", + " \"\"\"\n", + " \n", " self.path = path\n", " self.postfix = postfix\n", " self.img_list = img_list\n", @@ -77,48 +82,43 @@ " self.df = self._create_data_frame()\n", "\n", " def _create_data_frame(self):\n", - " '''Private method that returns a dataframe with information about the dataset\n", - "\n", - " Returns:\n", - " DataFrame: A DataFrame with information about the dataset.\n", - " '''\n", + " \"\"\"Private method that returns a dataframe with information about the dataset.\"\"\"\n", "\n", " if self.path:\n", " self.img_list = glob.glob(f'{self.path}/*{self.postfix}*')\n", " if not self.img_list: print('Could not find images. Check the image path')\n", - " \n", + "\n", " with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n", " data_info_dict = list(executor.map(self._get_data_info, self.img_list))\n", - " \n", + "\n", " df = pd.DataFrame(data_info_dict)\n", - " if df.orientation.nunique() > 1: print('The volumes in this dataset have different orientations. Recommended to pass in the argument reorder=True when creating a MedDataset object for this dataset')\n", + " \n", + " if df.orientation.nunique() > 1:\n", + " print('The volumes in this dataset have different orientations. '\n", + " 'Recommended to pass in the argument reorder=True when creating a MedDataset object for this dataset')\n", + "\n", " return df\n", "\n", " def summary(self):\n", - " '''Summary DataFrame of the dataset with example path for similar data.'''\n", - "\n", + " \"\"\"Summary DataFrame of the dataset with example path for similar data.\"\"\"\n", + " \n", " columns = ['dim_0', 'dim_1', 'dim_2', 'voxel_0', 'voxel_1', 'voxel_2', 'orientation']\n", - " return self.df.groupby(columns,as_index=False).agg(example_path=('path', 'min'), total=('path', 'size')).sort_values('total', ascending=False)\n", + " \n", + " return self.df.groupby(columns, as_index=False).agg(\n", + " example_path=('path', 'min'), total=('path', 'size')\n", + " ).sort_values('total', ascending=False)\n", "\n", " def suggestion(self):\n", - " '''Voxel value that appears most often in dim_0, dim_1 and dim_2, and wheter the data should be reoriented.'''\n", + " \"\"\"Voxel value that appears most often in dim_0, dim_1 and dim_2, and whether the data should be reoriented.\"\"\"\n", + " \n", " resample = [self.df.voxel_0.mode()[0], self.df.voxel_1.mode()[0], self.df.voxel_2.mode()[0]]\n", - "\n", " return resample, self.reorder\n", "\n", - " def _get_data_info(self, fn:str):\n", - " '''Private method to collect information about an image file.\n", - "\n", - " Args:\n", - " fn: Image file path.\n", - "\n", - " Returns:\n", - " dict: A dictionary with information about the image file\n", - " '''\n", - "\n", - " _,o,_ = med_img_reader(fn, dtype=self.dtype, reorder=self.reorder, only_tensor=False)\n", + " def _get_data_info(self, fn: str):\n", + " \"\"\"Private method to collect information about an image file.\"\"\"\n", + " _, o, _ = med_img_reader(fn, dtype=self.dtype, reorder=self.reorder, only_tensor=False)\n", "\n", - " info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2' :o.shape[3],\n", + " info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2': o.shape[3],\n", " 'voxel_0': round(o.spacing[0], 4), 'voxel_1': round(o.spacing[1], 4), 'voxel_2': round(o.spacing[2], 4),\n", " 'orientation': f'{\"\".join(o.orientation)}+'}\n", "\n", @@ -129,105 +129,48 @@ "\n", " return info_dict\n", "\n", - " def get_largest_img_size(self,\n", - " resample:list=None # A list with voxel spacing [dim_0, dim_1, dim_2]\n", - " ) -> list:\n", - " '''Get the largest image size in the dataset.'''\n", - " dims = None \n", + " def get_largest_img_size(self, resample: list = None) -> list:\n", + " \"\"\"Get the largest image size in the dataset.\"\"\"\n", " \n", - " if resample is not None: \n", - " \n", + " dims = None\n", + "\n", + " if resample is not None:\n", " org_voxels = self.df[[\"voxel_0\", \"voxel_1\", 'voxel_2']].values\n", " org_dims = self.df[[\"dim_0\", \"dim_1\", 'dim_2']].values\n", - " \n", + "\n", " ratio = org_voxels/resample\n", " new_dims = (org_dims * ratio).T\n", " dims = [new_dims[0].max().round(), new_dims[1].max().round(), new_dims[2].max().round()]\n", - " \n", - " else: dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()]\n", - " \n", + "\n", + " else:\n", + " dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()]\n", + "\n", " return dims" ] }, { "cell_type": "code", "execution_count": null, - "id": "baaa5a59-2c84-4009-a7d3-4f00f1cce441", + "id": "9b81f6e8-abd7-4bf6-be4c-4118986c308a", "metadata": {}, "outputs": [], "source": [ "#| export \n", - "def get_class_weights(train_labels:(np.array, list), class_weight='balanced'): \n", - " '''calculate class weights.'''\n", + "def get_class_weights(labels: (np.array, list), class_weight: str = 'balanced') -> torch.Tensor: \n", + " \"\"\"Calculates and returns the class weights.\n", + "\n", + " Args:\n", + " labels: An array or list of class labels for each instance in the dataset.\n", + " class_weight: Defaults to 'balanced'.\n", + "\n", + " Returns:\n", + " A tensor of class weights.\n", + " \"\"\"\n", + " \n", + " class_weights = compute_class_weight(class_weight=class_weight, classes=np.unique(labels), y=labels)\n", " \n", - " class_weights = compute_class_weight(class_weight=class_weight, classes=np.unique(train_labels), y=train_labels)\n", " return torch.Tensor(class_weights)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f5dd82f6-f08d-42e8-9d2d-b3c624af7ce3", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "#### MedDataset.summary\n", - "\n", - "> MedDataset.summary ()\n", - "\n", - "Summary DataFrame of the dataset with example path for similar data." - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(MedDataset.summary)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12c20050-2f33-44bb-98cd-a109e3efdff1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "---\n", - "\n", - "#### MedDataset.get_largest_img_size\n", - "\n", - "> MedDataset.get_largest_img_size (resample:list=None)\n", - "\n", - "Get the largest image size in the dataset.\n", - "\n", - "| | **Type** | **Default** | **Details** |\n", - "| -- | -------- | ----------- | ----------- |\n", - "| resample | list | None | A list with voxel spacing [dim_0, dim_1, dim_2] |\n", - "| **Returns** | **list** | | |" - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "show_doc(MedDataset.get_largest_img_size)" - ] } ], "metadata": { diff --git a/nbs/09_external_data.ipynb b/nbs/09_external_data.ipynb index 7e9ac48..dae51e8 100644 --- a/nbs/09_external_data.ipynb +++ b/nbs/09_external_data.ipynb @@ -10,16 +10,6 @@ "#| default_exp external_data" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "d2e473e4-69ca-4fe9-ba7a-458f2f500eef", - "metadata": {}, - "outputs": [], - "source": [ - "#todo" - ] - }, { "cell_type": "code", "execution_count": null, @@ -57,33 +47,42 @@ "source": [ "#| export\n", "class MURLs():\n", - " '''A class with external medical dataset URLs.'''\n", + " \"\"\"A class with external medical dataset URLs.\"\"\"\n", "\n", " IXI_DATA = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'\n", " IXI_DEMOGRAPHIC_INFORMATION = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'\n", " CHENGWEN_CHU_SPINE_DATA = 'https://drive.google.com/uc?id=1rbm9-KKAexpNm2mC9FsSbfnS8VJaF3Kn&confirm=t'\n", " EXAMPLE_SPINE_DATA = 'https://drive.google.com/uc?id=1Ms3Q6MYQrQUA_PKZbJ2t2NeYFQ5jloMh'\n", - " NODULE_MNIST_DATA = 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1'" + " #NODULE_MNIST_DATA = 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1'\n", + " MEDMNIST_DICT = {'OrganMNIST3D': 'https://zenodo.org/record/6496656/files/organmnist3d.npz?download=1',\t\n", + " 'NoduleMNIST3D': 'https://zenodo.org/record/6496656/files/nodulemnist3d.npz?download=1',\n", + " 'AdrenalMNIST3D': 'https://zenodo.org/record/6496656/files/adrenalmnist3d.npz?download=1',\t\n", + " 'FractureMNIST3D': 'https://zenodo.org/record/6496656/files/fracturemnist3d.npz?download=1',\n", + " 'VesselMNIST3D': 'https://zenodo.org/record/6496656/files/vesselmnist3d.npz?download=1', \n", + " 'SynapseMNIST3D': 'https://zenodo.org/record/6496656/files/synapsemnist3d.npz?download=1'}" ] }, { "cell_type": "code", "execution_count": null, - "id": "61aa5aa9-44ec-4930-9d38-b91af7d6e804", + "id": "cb4c061a-d7f5-4ba0-9e7b-ee0c6ec480b0", "metadata": {}, "outputs": [], "source": [ - "#| export \n", - "def _process_ixi_xls(xls_path:(str, Path), img_path: Path):\n", - " '''Private method to process the demographic information for the IXI dataset.\n", + "#| export\n", + "def _process_ixi_xls(xls_path: (str, Path), img_path: Path) -> pd.DataFrame:\n", + " \"\"\"Private method to process the demographic information for the IXI dataset.\n", "\n", " Args:\n", " xls_path: File path to the xls file with the demographic information.\n", - " img_path: Folder path to the images\n", + " img_path: Folder path to the images.\n", "\n", " Returns:\n", - " DataFrame: A processed dataframe with image path and demographic information.\n", - " '''\n", + " A processed dataframe with image path and demographic information.\n", + "\n", + " Raises:\n", + " ValueError: If xls_path or img_path do not exist.\n", + " \"\"\"\n", "\n", " print('Preprocessing ' + str(xls_path))\n", "\n", @@ -93,14 +92,14 @@ "\n", " for subject_id in duplicate_subject_ids:\n", " age = df.loc[df.IXI_ID == subject_id].AGE.nunique()\n", - " if age != 1: df = df.loc[df.IXI_ID != subject_id] #Remove duplicates with two different age values\n", + " if age != 1: df = df.loc[df.IXI_ID != subject_id] # Remove duplicates with two different age values\n", "\n", " df = df.drop_duplicates(subset='IXI_ID', keep='first').reset_index(drop=True)\n", "\n", " df['subject_id'] = ['IXI' + str(subject_id).zfill(3) for subject_id in df.IXI_ID.values]\n", " df = df.rename(columns={'SEX_ID (1=m, 2=f)': 'gender'})\n", " df['age_at_scan'] = df.AGE.round(2)\n", - " df = df.replace({'gender': {1:'M', 2:'F'}})\n", + " df = df.replace({'gender': {1: 'M', 2: 'F'}})\n", "\n", " img_list = list(img_path.glob('*.nii.gz'))\n", " for path in img_list:\n", @@ -109,6 +108,7 @@ "\n", " df = df.dropna()\n", " df = df[['t1_path', 'subject_id', 'gender', 'age_at_scan']]\n", + " \n", " return df" ] }, @@ -123,40 +123,41 @@ { "cell_type": "code", "execution_count": null, - "id": "712079a2-b3b0-4658-b830-34eefe140417", + "id": "6714a68f-1378-46b3-aeff-ef940213ac2f", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def download_ixi_data(path:(str, Path)='../data' # Path to the directory where the data will be stored\n", - " ):\n", - " '''Download T1 scans and demographic information from the IXI dataset, then process the demographic \n", - " information for each subject and save the information as a CSV file.\n", - " Returns path to the stored CSV file.\n", - " '''\n", - " path = Path(path)/'IXI'\n", - " img_path = path/'T1_images' \n", + "def download_ixi_data(path: (str, Path) = '../data') -> Path:\n", + " \"\"\"Download T1 scans and demographic information from the IXI dataset.\n", + " \n", + " Args:\n", + " path: Path to the directory where the data will be stored. Defaults to '../data'.\n", + "\n", + " Returns:\n", + " The path to the stored CSV file.\n", + " \"\"\"\n", + "\n", + " path = Path(path) / 'IXI'\n", + " img_path = path / 'T1_images'\n", "\n", " # Check whether image data already present in img_path:\n", - " is_extracted=False\n", + " is_extracted = False\n", " try:\n", - " if len(list(img_path.iterdir())) >= 581: # 581 imgs in the IXI dataset\n", - " is_extracted=True\n", + " if len(list(img_path.iterdir())) >= 581: # 581 imgs in the IXI dataset\n", + " is_extracted = True\n", " print(f\"Images already downloaded and extracted to {img_path}\")\n", " except:\n", - " is_extracted=False\n", + " is_extracted = False\n", "\n", - " # Download and extract images\n", - " if not is_extracted: \n", - " download_and_extract(url=MURLs.IXI_DATA, filepath=path/'IXI-T1.tar', output_dir=img_path)\n", - " (path/'IXI-T1.tar').unlink()\n", + " if not is_extracted:\n", + " download_and_extract(url=MURLs.IXI_DATA, filepath=path / 'IXI-T1.tar', output_dir=img_path)\n", + " (path / 'IXI-T1.tar').unlink()\n", "\n", + " download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path / 'IXI.xls')\n", "\n", - " # Download demographic info\n", - " download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path/'IXI.xls')\n", - "\n", - " processed_df = _process_ixi_xls(xls_path=path/'IXI.xls', img_path=img_path)\n", - " processed_df.to_csv(path/'dataset.csv',index=False)\n", + " processed_df = _process_ixi_xls(xls_path=path / 'IXI.xls', img_path=img_path)\n", + " processed_df.to_csv(path / 'dataset.csv', index=False)\n", "\n", " return path" ] @@ -172,19 +173,25 @@ { "cell_type": "code", "execution_count": null, - "id": "e39ec7dd-5913-41d0-823f-064fc5b9bf75", + "id": "7753da8a-93e8-4bf3-8f78-bb158b4280d0", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def download_ixi_tiny(path:(str, Path)='../data'):\n", - " ''' Download tiny version of IXI provided by TorchIO, containing 566 T1 brain MR scans and their corresponding brain segmentations.'''\n", + "def download_ixi_tiny(path: (str, Path) = '../data') -> Path:\n", + " \"\"\"Download the tiny version of the IXI dataset provided by TorchIO.\n", + "\n", + " Args:\n", + " path: The directory where the data will be \n", + " stored. If not provided, defaults to '../data'.\n", + "\n", + " Returns:\n", + " The path to the directory where the data is stored.\n", + " \"\"\"\n", " \n", - " path = Path(path)/'IXITiny'\n", + " path = Path(path) / 'IXITiny'\n", " \n", - " #Download MR scans and segmentation masks\n", " IXITiny(root=str(path), download=True)\n", - " # Download demographic info\n", " download_url(url=MURLs.IXI_DEMOGRAPHIC_INFORMATION, filepath=path/'IXI.xls')\n", " \n", " processed_df = _process_ixi_xls(xls_path=path/'IXI.xls', img_path=path/'image')\n", @@ -195,77 +202,111 @@ " return path" ] }, + { + "cell_type": "markdown", + "id": "17de9ba6-00b5-408e-8dee-abafc62926ef", + "metadata": {}, + "source": [ + "## Lower spine data " + ] + }, { "cell_type": "code", "execution_count": null, - "id": "c7e71e62-862d-4c80-9740-2215c2ce8f0e", + "id": "b466174a-4b49-4a8f-92c6-1e5e3ca9fc2a", "metadata": {}, "outputs": [], "source": [ "#| export\n", - "def _create_spine_df(test_dir:Path):\n", - " # Get a list of the image files in the 'img' directory\n", - " img_list = glob(str(test_dir/'img/*.nii.gz'))\n", + "def _create_spine_df(dir: Path) -> pd.DataFrame:\n", + " \"\"\"Create a pandas DataFrame containing information about spinal images.\n", "\n", - " # Create a list of the corresponding mask files in the 'seg' directory\n", - " mask_list = [str(fn).replace('img', 'seg') for fn in img_list]\n", + " Args:\n", + " dir: Directory path where data (image and segmentation \n", + " mask files) are stored.\n", "\n", - " # Create a list of the subject IDs for each image file\n", + " Returns:\n", + " A DataFrame containing the paths to the image files and their \n", + " corresponding mask files, the subject IDs, and a flag indicating that \n", + " these are test data.\n", + " \"\"\"\n", + " \n", + " img_list = glob(str(dir / 'img/*.nii.gz'))\n", + " mask_list = [str(fn).replace('img', 'seg') for fn in img_list]\n", " subject_id_list = [fn.split('_')[-1].split('.')[0] for fn in mask_list]\n", " \n", - " # Create a dictionary containing the test data\n", - " test_data = {'t2_img_path':img_list, 't2_mask_path':mask_list, 'subject_id':subject_id_list, 'is_test':True}\n", + " test_data = {\n", + " 't2_img_path': img_list,\n", + " 't2_mask_path': mask_list,\n", + " 'subject_id': subject_id_list,\n", + " 'is_test': True,\n", + " }\n", "\n", - " # Create a DataFrame from the example data dictionary\n", " return pd.DataFrame(test_data)" ] }, - { - "cell_type": "markdown", - "id": "17de9ba6-00b5-408e-8dee-abafc62926ef", - "metadata": {}, - "source": [ - "## Lower spine data " - ] - }, { "cell_type": "code", "execution_count": null, - "id": "336f687b-7997-43ab-a2a0-32376c329fb6", + "id": "26256dca-d9df-43f6-b8eb-f36ce2a445dc", "metadata": {}, "outputs": [], "source": [ - "#| export \n", - "def download_spine_test_data(path:(str, Path)='../data'):\n", + "#| export \n", + "def download_spine_test_data(path: (str, Path) = '../data') -> pd.DataFrame:\n", + " \"\"\"Downloads T2w scans from the study 'Fully Automatic Localization and \n", + " Segmentation of 3D Vertebral Bodies from CT/MR Images via a Learning-Based \n", + " Method' by Chu et. al. \n", + "\n", + " Args:\n", + " path: Directory where the downloaded data \n", + " will be stored and extracted. Defaults to '../data'.\n", + "\n", + " Returns:\n", + " Processed dataframe containing image paths, label paths, and subject IDs.\n", + " \"\"\"\n", " \n", - " ''' Download T2w scans from 'Fully Automatic Localization and Segmentation of 3D Vertebral Bodies from CT/MR Images via a Learning-Based Method' study by Chu et. al. \n", - " Returns a processed dataframe with image path, label path and subject IDs. \n", - " '''\n", " study = 'chengwen_chu_2015'\n", " \n", - " download_and_extract(url=MURLs.CHENGWEN_CHU_SPINE_DATA, filepath=f'{study}.zip', output_dir=path)\n", + " download_and_extract(\n", + " url=MURLs.CHENGWEN_CHU_SPINE_DATA, \n", + " filepath=f'{study}.zip', \n", + " output_dir=path\n", + " )\n", " Path(f'{study}.zip').unlink()\n", " \n", - " return _create_spine_df(Path(path)/study)" + " return _create_spine_df(Path(path) / study)" ] }, { "cell_type": "code", "execution_count": null, - "id": "8f0fae60-db03-4ead-a9e4-0e092d62d3f3", + "id": "93b77ec9-a93a-42cc-b707-4e1e75063533", "metadata": {}, "outputs": [], "source": [ "#| export \n", - "def download_example_spine_data(path:(str, Path)='../data'): \n", + "def download_example_spine_data(path: (str, Path) = '../data') -> Path:\n", + " \"\"\"Downloads example T2w scan and corresponding predicted mask.\n", + " \n", + " Args:\n", + " path: Directory where the downloaded data \n", + " will be stored and extracted. Defaults to '../data'.\n", + "\n", + " Returns:\n", + " Path to the directory where the example data has been extracted.\n", + " \"\"\"\n", " \n", - " '''Download example T2w scan and predicted mask.'''\n", " study = 'example_data'\n", " \n", - " download_and_extract(url=MURLs.EXAMPLE_SPINE_DATA, filepath='example_data.zip', output_dir=path);\n", + " download_and_extract(\n", + " url=MURLs.EXAMPLE_SPINE_DATA, \n", + " filepath='example_data.zip', \n", + " output_dir=path\n", + " )\n", " Path('example_data.zip').unlink()\n", " \n", - " return Path(path/study)" + " return Path(path) / study" ] }, { @@ -273,7 +314,7 @@ "id": "228c0417-392c-4897-949c-d2cb572cd855", "metadata": {}, "source": [ - "## NoduleMNIST3D" + "## MedMNIST3D" ] }, { @@ -282,10 +323,112 @@ "id": "37b05b40-aeee-4906-aa98-ff79b3d667fe", "metadata": {}, "outputs": [], + "source": [ + "# #| export \n", + "# def _process_nodule_img(path, idx_arr):\n", + "# \"\"\"Save tensor as NIfTI.\"\"\"\n", + " \n", + "# idx, arr = idx_arr\n", + "# img = ScalarImage(tensor=arr[None, :])\n", + "# fn = path/f'{idx}_nodule.nii.gz'\n", + "# img.save(fn)\n", + "# return str(fn)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc07388c-50e9-4cb4-ab0e-62ae88d8c0eb", + "metadata": {}, + "outputs": [], + "source": [ + "# #| export \n", + "# def _df_sort_and_add_columns(df, label_list, is_val):\n", + "# \"\"\"Sort the dataframe based on img_idx and add labels and if it is validation data column.\"\"\"\n", + " \n", + "# df = df.sort_values(by='img_idx').reset_index(drop=True)\n", + "# df['labels'], df['is_val'] = label_list, is_val \n", + "# df = df.replace({\"labels\": {0:'b', 1:'m'}})\n", + "# df = df.drop('img_idx', axis=1)\n", + " \n", + "# return df " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44dd1afb-6028-4daa-aa05-41d57c7d9355", + "metadata": {}, + "outputs": [], + "source": [ + "# #| export \n", + "# def _create_nodule_df(pool, output_dir, imgs, labels, is_val=False): \n", + "# \"\"\"Create dataframe for NoduleMNIST3D data.\"\"\"\n", + " \n", + "# img_path_list = pool.map(partial(_process_nodule_img, output_dir), enumerate(imgs))\n", + "# img_idx = [float(Path(fn).parts[-1].split('_')[0]) for fn in img_path_list]\n", + " \n", + "# df = pd.DataFrame(list(zip(img_path_list, img_idx)), columns=['img_path','img_idx']) \n", + "# return _df_sort_and_add_columns(df, labels, is_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fdd18417-697e-45f6-a59c-88ccc46708fd", + "metadata": {}, + "outputs": [], + "source": [ + "# #| export \n", + "# def download_NoduleMNIST3D(path: (str, Path) = '../data', max_workers: int = 1) -> Tuple[pd.DataFrame, pd.DataFrame]:\n", + "# \"\"\"Downloads and processes NoduleMNIST3D data.\n", + "\n", + "# Args:\n", + "# path: Directory where the downloaded data\n", + "# will be stored and extracted. Defaults to '../data'.\n", + "# max_workers: Maximum number of worker processes to use\n", + "# for data processing. Defaults to 1.\n", + "\n", + "# Returns:\n", + "# A tuple of two pandas DataFrames. The first DataFrame combines training and validation data, \n", + "# and the second DataFrame contains the testing data.\n", + "# \"\"\"\n", + " \n", + "# study = 'NoduleMNIST3D'\n", + "# path = Path(path) / study\n", + " \n", + "# download_url(url=MURLs.NODULE_MNIST_DATA, filepath=path / f'{study}.npz')\n", + "# data = load(path / f'{study}.npz')\n", + "# key_fn = ['train_images', 'val_images', 'test_images']\n", + " \n", + "# for fn in key_fn: \n", + "# (path / fn).mkdir(exist_ok=True)\n", + " \n", + "# train_imgs, val_imgs, test_imgs = data[key_fn[0]], data[key_fn[1]], data[key_fn[2]]\n", + "\n", + "# with mp.Pool(processes=max_workers) as pool:\n", + "# train_df = _create_nodule_df(pool, path / key_fn[0], train_imgs, data['train_labels'])\n", + "# val_df = _create_nodule_df(pool, path / key_fn[1], val_imgs, data['val_labels'], is_val=True)\n", + "# test_df = _create_nodule_df(pool, path / key_fn[2], test_imgs, data['test_labels'])\n", + " \n", + "# train_val_df = pd.concat([train_df, val_df], ignore_index=True)\n", + " \n", + "# (path / f'{study}.npz').unlink()\n", + " \n", + "# return train_val_df, test_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89f60f76-5e5d-4a40-b49a-617ec2d35731", + "metadata": {}, + "outputs": [], "source": [ "#| export \n", - "def _process_nodule_img(path, idx_arr):\n", - " '''Save tensor as NIfTI.'''\n", + "def _process_medmnist_img(path, idx_arr):\n", + " \"\"\"Save tensor as NIfTI.\"\"\"\n", + " \n", " idx, arr = idx_arr\n", " img = ScalarImage(tensor=arr[None, :])\n", " fn = path/f'{idx}_nodule.nii.gz'\n", @@ -296,16 +439,17 @@ { "cell_type": "code", "execution_count": null, - "id": "dc07388c-50e9-4cb4-ab0e-62ae88d8c0eb", + "id": "b0199679-707b-445d-8467-3d342246322a", "metadata": {}, "outputs": [], "source": [ "#| export \n", "def _df_sort_and_add_columns(df, label_list, is_val):\n", - " '''Sort the dataframe based on img_idx and add labels and if it is validation data column'''\n", + " \"\"\"Sort the dataframe based on img_idx and add labels and if it is validation data column.\"\"\"\n", + " \n", " df = df.sort_values(by='img_idx').reset_index(drop=True)\n", " df['labels'], df['is_val'] = label_list, is_val \n", - " df = df.replace({\"labels\": {0:'b', 1:'m'}})\n", + " #df = df.replace({\"labels\": {0:'b', 1:'m'}})\n", " df = df.drop('img_idx', axis=1)\n", " \n", " return df " @@ -314,14 +458,15 @@ { "cell_type": "code", "execution_count": null, - "id": "44dd1afb-6028-4daa-aa05-41d57c7d9355", + "id": "d82157b8-ab69-4a38-9323-af4b58c6b54a", "metadata": {}, "outputs": [], "source": [ "#| export \n", "def _create_nodule_df(pool, output_dir, imgs, labels, is_val=False): \n", - " '''Create dataframe for NoduleMNIST3D data.'''\n", - " img_path_list = pool.map(partial(_process_nodule_img, output_dir), enumerate(imgs))\n", + " \"\"\"Create dataframe for MedMNIST data.\"\"\"\n", + " \n", + " img_path_list = pool.map(partial(_process_medmnist_img, output_dir), enumerate(imgs))\n", " img_idx = [float(Path(fn).parts[-1].split('_')[0]) for fn in img_path_list]\n", " \n", " df = pd.DataFrame(list(zip(img_path_list, img_idx)), columns=['img_path','img_idx']) \n", @@ -331,35 +476,55 @@ { "cell_type": "code", "execution_count": null, - "id": "97237f95-ed5b-4134-88df-a61d5b48d17a", + "id": "9fdccde7-f39d-43e0-be08-73998b984aab", "metadata": {}, "outputs": [], "source": [ "#| export \n", - "def download_NoduleMNIST3D(path:(str, Path)='../data', max_workers=1): \n", - " \n", - " '''Download ....'''\n", - " study = 'NoduleMNIST3D'\n", - " path = Path(path)/study\n", - " \n", - " download_url(url=MURLs.NODULE_MNIST_DATA, filepath=path/f'{study}.npz');\n", - " data = load(path/f'{study}.npz')\n", - " key_fn = ['train_images', 'val_images', 'test_images'] \n", - " for fn in key_fn: (path/fn).mkdir(exist_ok=True)\n", - " \n", - " \n", - " train_imgs, val_imgs, test_imgs = data[key_fn[0]], data[key_fn[1]], data[key_fn[2]]\n", + "def download_and_process_MedMNIST3D(study: str, \n", + " path: (str, Path) = '../data', \n", + " max_workers: int = 1) -> Tuple[pd.DataFrame, pd.DataFrame]:\n", + " \"\"\"Downloads and processes a particular MedMNIST dataset.\n", + "\n", + " Args:\n", + " study: select MedMNIST dataset ('OrganMNIST3D', 'NoduleMNIST3D', \n", + " 'AdrenalMNIST3D', 'FractureMNIST3D', 'VesselMNIST3D', 'SynapseMNIST3D')\n", + " path: Directory where the downloaded data\n", + " will be stored and extracted. Defaults to '../data'.\n", + " max_workers: Maximum number of worker processes to use\n", + " for data processing. Defaults to 1.\n", "\n", + " Returns:\n", + " Two pandas DataFrames. The first DataFrame combines training and validation data, \n", + " and the second DataFrame contains the testing data.\n", + " \"\"\"\n", + " path = Path(path) / study\n", + " dataset_file_path = path / f'{study}.npz'\n", + "\n", + " try: \n", + " download_url(url=MURLs.MEDMNIST_DICT[study], filepath=dataset_file_path)\n", + " except: \n", + " raise ValueError(f\"Dataset '{study}' does not exist.\")\n", "\n", + " data = load(dataset_file_path)\n", + " keys = ['train_images', 'val_images', 'test_images']\n", + "\n", + " for key in keys: \n", + " (path / key).mkdir(exist_ok=True)\n", + " \n", + " train_imgs, val_imgs, test_imgs = data[keys[0]], data[keys[1]], data[keys[2]]\n", + "\n", + " # Process the data and create DataFrames\n", " with mp.Pool(processes=max_workers) as pool:\n", - " \n", - " train_df = _create_nodule_df(pool, path/key_fn[0], train_imgs, data['train_labels'])\n", - " val_df = _create_nodule_df(pool, path/key_fn[1], val_imgs, data['val_labels'], is_val=True)\n", - " test_df = _create_nodule_df(pool, path/key_fn[2], test_imgs, data['test_labels'])\n", - " \n", + " train_df = _create_nodule_df(pool, path / keys[0], train_imgs, data['train_labels'])\n", + " val_df = _create_nodule_df(pool, path / keys[1], val_imgs, data['val_labels'], is_val=True)\n", + " test_df = _create_nodule_df(pool, path / keys[2], test_imgs, data['test_labels'])\n", + "\n", " train_val_df = pd.concat([train_df, val_df], ignore_index=True)\n", - " \n", - " return train_val_df, test_df" + "\n", + " dataset_file_path.unlink()\n", + "\n", + " return train_val_df, test_df\n" ] } ], diff --git a/settings.ini b/settings.ini index 2d840ad..412c0e2 100644 --- a/settings.ini +++ b/settings.ini @@ -5,7 +5,7 @@ ### Python Library ### lib_name = fastMONAI min_python = 3.7 -version = 0.3.1 +version = 0.3.2 ### OPTIONAL ### requirements = fastai==2.7.12 monai==1.2.0 torchio==0.18.91 xlrd>=1.2.0 scikit-image==0.19.3 huggingface-hub gdown