diff --git a/examples/2D/n2v/example_BSD68_lightning.ipynb b/examples/2D/n2v/example_BSD68_lightning.ipynb index e252162c..9829136c 100644 --- a/examples/2D/n2v/example_BSD68_lightning.ipynb +++ b/examples/2D/n2v/example_BSD68_lightning.ipynb @@ -8,20 +8,18 @@ "source": [ "from pathlib import Path\n", "\n", - "import tifffile\n", "import matplotlib.pyplot as plt\n", + "import tifffile\n", + "from careamics_portfolio import PortfolioManager\n", "from pytorch_lightning import Trainer\n", - "import albumentations as Aug\n", "\n", - "from careamics_portfolio import PortfolioManager\n", - "from careamics.lightning_module import (\n", - " CAREamicsModule,\n", - " CAREamicsTrainDataModule,\n", + "from careamics import CAREamicsModule\n", + "from careamics.lightning_prediction import CAREamicsFiring\n", + "from careamics.ligthning_datamodule import (\n", " CAREamicsPredictDataModule,\n", - " CAREamicsFiring,\n", + " CAREamicsTrainDataModule,\n", ")\n", - "from careamics.utils.metrics import psnr\n", - "from careamics.transforms import ManipulateN2V" + "from careamics.utils.metrics import psnr" ] }, { @@ -38,8 +36,18 @@ "metadata": {}, "outputs": [], "source": [ - "# Download and unzip the files\n", + "# Explore portfolio\n", "portfolio = PortfolioManager()\n", + "print(portfolio.denoising)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Download and unzip the files\n", "root_path = Path(\"data\")\n", "files = portfolio.denoising.N2V_BSD68.download(root_path)\n", "print(f\"List of downloaded files: {files}\")" @@ -55,7 +63,12 @@ "train_path = data_path / \"train\"\n", "val_path = data_path / \"val\"\n", "test_path = data_path / \"test\" / \"images\"\n", - "gt_path = data_path / \"test\" / \"gt\"" + "gt_path = data_path / \"test\" / \"gt\"\n", + "\n", + "train_path.mkdir(parents=True, exist_ok=True)\n", + "val_path.mkdir(parents=True, exist_ok=True)\n", + "test_path.mkdir(parents=True, exist_ok=True)\n", + "gt_path.mkdir(parents=True, exist_ok=True)" ] }, { @@ -118,7 +131,9 @@ " algorithm=\"n2v\",\n", " loss=\"n2v\",\n", " architecture=\"UNet\",\n", - ")\n" + " optimizer_parameters={\"lr\": 1e-4},\n", + " lr_scheduler_parameters={\"factor\": 0.5, \"patience\": 10},\n", + ")" ] }, { @@ -129,17 +144,6 @@ "### Define the Transforms" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "transforms = Aug.Compose(\n", - " [Aug.Flip(), Aug.RandomRotate90(), Aug.Normalize(), ManipulateN2V()],\n", - ")" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -161,7 +165,6 @@ " patch_size=(64, 64),\n", " axes=\"SYX\",\n", " batch_size=128,\n", - " transforms=transforms,\n", " num_workers=4,\n", ")" ] @@ -182,7 +185,7 @@ "metadata": {}, "outputs": [], "source": [ - "trainer = Trainer(max_epochs=1)" + "trainer = Trainer(max_epochs=50)" ] }, { @@ -202,17 +205,6 @@ "### Define a prediction datamodule" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "transforms_predict = Aug.Compose(\n", - " [Aug.Normalize()],\n", - ")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -225,8 +217,6 @@ " tile_size=(256, 256),\n", " axes=\"YX\",\n", " batch_size=1,\n", - " num_workers=0,\n", - " transforms=transforms_predict,\n", ")" ] }, @@ -324,7 +314,7 @@ "psnr_total = 0\n", "\n", "for pred, gt in zip(preds, gts):\n", - " psnr_total += psnr(gt, pred)\n", + " psnr_total += psnr(gt, pred.squeeze())\n", "\n", "print(f\"PSNR total: {psnr_total / len(preds)}\")" ] @@ -353,7 +343,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/examples/careamics_api.py b/examples/careamics_api.py index e0dfa329..c9c96e40 100644 --- a/examples/careamics_api.py +++ b/examples/careamics_api.py @@ -1,7 +1,8 @@ from careamics import CAREamist, Configuration + def main(): - config_dict ={ + config_dict = { "experiment_name": "ConfigTest", "working_directory": ".", "algorithm": { @@ -14,9 +15,7 @@ def main(): "optimizer": { "name": "Adam", }, - "lr_scheduler": { - "name": "ReduceLROnPlateau" - }, + "lr_scheduler": {"name": "ReduceLROnPlateau"}, }, "training": { "num_epochs": 1, @@ -42,5 +41,5 @@ def main(): # print(pred.shape) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/examples/careamics_lightning_api.ipynb b/examples/careamics_lightning_api.ipynb index d1b07db5..f4a4b666 100644 --- a/examples/careamics_lightning_api.ipynb +++ b/examples/careamics_lightning_api.ipynb @@ -9,12 +9,11 @@ "import albumentations as Aug\n", "from pytorch_lightning import Trainer\n", "\n", - "\n", "from careamics import (\n", " CAREamicsModule,\n", " CAREamicsTrainDataModule,\n", ")\n", - "from careamics.transforms import ManipulateN2V\n" + "from careamics.transforms import ManipulateN2V" ] }, { @@ -26,20 +25,20 @@ "# Instantiate ligthning module\n", "model = CAREamicsModule(\n", " algorithm=\"n2v\",\n", - " loss=\"n2v\", \n", + " loss=\"n2v\",\n", " architecture=\"UNet\",\n", " model_parameters={\n", " # parameters such as depth, n2v2, etc. See UNet definition.\n", " },\n", - " optimizer=\"Adam\", # see SupportedOptimizer\n", + " optimizer=\"Adam\", # see SupportedOptimizer\n", " optimizer_parameters={\n", " \"lr\": 1e-4,\n", " # parameters from torch.optim\n", " },\n", - " lr_scheduler=\"ReduceLROnPlateau\", # see SupportedLRScheduler\n", + " lr_scheduler=\"ReduceLROnPlateau\", # see SupportedLRScheduler\n", " lr_scheduler_parameters={\n", " # parameters from torch.optim.lr_scheduler\n", - " }\n", + " },\n", ")" ] }, @@ -68,9 +67,12 @@ "outputs": [], "source": [ "# define function to read data\n", + "\n", + "\n", "def read_my_data_type(file):\n", " pass\n", "\n", + "\n", "# Create your transforms using albumentations\n", "transforms = Aug.Compose(\n", " [Aug.Flip(), Aug.RandomRotate90(), Aug.Normalize(), ManipulateN2V()],\n", @@ -83,13 +85,13 @@ "train_data_module = CAREamicsTrainDataModule(\n", " train_path=train_path,\n", " val_path=val_path,\n", - " data_type=\"custom\", # this forces read_source_func to be specified\n", + " data_type=\"custom\", # this forces read_source_func to be specified\n", " patch_size=(64, 64),\n", " axes=\"SYX\",\n", " batch_size=128,\n", " transforms=transforms,\n", " num_workers=4,\n", - " read_source_func = read_my_data_type # function to read data\n", + " read_source_func=read_my_data_type, # function to read data\n", ")" ] }, diff --git a/examples/lightning_test.ipynb b/examples/lightning_test.ipynb index 8aeb95f1..cc6d6a33 100644 --- a/examples/lightning_test.ipynb +++ b/examples/lightning_test.ipynb @@ -2,31 +2,27 @@ "cells": [ { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from careamics.lightning import LUNet\n", - "\n", - "import torch\n", + "import matplotlib.pyplot as plt\n", "import pytorch_lightning as L\n", + "import torch\n", "\n", "from careamics.config import load_configuration\n", "from careamics.dataset.prepare_dataset import (\n", + " get_prediction_dataset,\n", " get_train_dataset,\n", " get_validation_dataset,\n", - " get_prediction_dataset,\n", ")\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "from careamics.utils.normalization import denormalize\n", - "from careamics.prediction import stitch_prediction\n" + "from careamics.lightning import LUNet\n", + "from careamics.prediction import stitch_prediction" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -35,25 +31,14 @@ ")\n", "train_path = \"/home/igor.zubarev/projects/caremics/examples/2D/data/denoising-N2V_BSD68.unzip/BSD68_reproducibility_data/train\"\n", "val_path = \"/home/igor.zubarev/projects/caremics/examples/2D/data/denoising-N2V_BSD68.unzip/BSD68_reproducibility_data/val\"\n", - "test_path = \"/home/igor.zubarev/projects/caremics/examples/2D/data/denoising-N2V_BSD68.unzip/BSD68_reproducibility_data/test/image\"\n" + "test_path = \"/home/igor.zubarev/projects/caremics/examples/2D/data/denoising-N2V_BSD68.unzip/BSD68_reproducibility_data/test/image\"" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Computed dataset mean: 110.72953033447266, std: 63.656009674072266\n", - "Computed dataset mean: 96.2784652709961, std: 60.512428283691406\n", - "Calculated mean and std for 1 images\n", - "Mean: 96.2784652709961, std: 60.512428283691406\n" - ] - } - ], + "outputs": [], "source": [ "train_dataset = get_train_dataset(cfg, train_path)\n", "train_dataloader = torch.utils.data.DataLoader(\n", @@ -79,32 +64,14 @@ " batch_size=1,\n", " num_workers=0,\n", " pin_memory=False,\n", - ")\n" + ")" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Engine initialized from configuration\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (cuda), used: True\n", - "TPU available: False, using: 0 TPU cores\n", - "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n" - ] - } - ], + "outputs": [], "source": [ "model = LUNet(cfg)\n", "trainer = L.Trainer(max_epochs=10)" @@ -116,44 +83,21 @@ "metadata": {}, "outputs": [], "source": [ - "# trainer.fit(model, train_dataloader, val_dataloader)\n" + "# trainer.fit(model, train_dataloader, val_dataloader)" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n", - "/localscratch/mambaforge/envs/light/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:441: The 'predict_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=3` in the `DataLoader` to improve performance.\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "18c85bdeefbb45e0b962e2509ddaac6f", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Predicting: | | 0/? [00:00" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAa0AAAGhCAYAAADSlOtMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAADUAElEQVR4nO19f7RfVXXnvt8XSCiSxEBJSCWaOlawKipqTHWsSpaADmKlrTiZllpG+gO0Sqs0M6LV/ohSRx2USttlwa5qf7im0kpXcRAE2jFECDKdWpuCpZAKCVOZJAISwrtn/vh+z/3us+/e++xz7rnf976P72ett9579557zrnn1/69b+WcczDDDDPMMMMMU4DBQndghhlmmGGGGayYEa0ZZphhhhmmBjOiNcMMM8www9RgRrRmmGGGGWaYGsyI1gwzzDDDDFODGdGaYYYZZphhajAjWjPMMMMMM0wNZkRrhhlmmGGGqcGMaM0wwwwzzDA1mBGtGWaYYYYZpgYLSrSuuOIKeMYzngErVqyATZs2wde+9rWF7M4MM8wwwwyLHAtGtP70T/8ULr74Ynj/+98Pd9xxB5xyyilw+umnw4MPPrhQXZphhhlmmGGRo1qohLmbNm2Cl7zkJfDJT34SAADquoYTTzwR3v72t8Ov/uqvqs/WdQ33338/HHPMMVBV1SS6O8MMM8wwQ0E45+C73/0urF+/HgYDu/y0rMc+iXj88cdh165dsG3btubaYDCALVu2wI4dO1rlDx06BIcOHWr+//a3vw3Pec5zJtLXGWaYYYYZ+sOePXvgaU97mrn8ghCtf/u3f4P5+XlYu3ZtcH3t2rXwj//4j63y27dvhw984AOt6y959TZYtfshcA8/AnDkcoBBBVA7AFcD1PXotwMvTMakMk7oLCXJWerGZVrtDkb/VwJH4urh7xq1M0B10Od8eY9aELgHpB9S+yng+kqLCAqAYFx835h61LHsC3SsSL+cc0Ff6P8cchQhbJ20bzEocxND9L1ifUlpO7YvAMbrTdsDozab8a5rAMT9V1WV1lbz4IC/jsG9L9273PP0ua5zzD2v9T92pvgyVQUwmBveP3wYYP1auPfsY+HxZd+D+37z1+GYY45J6vaCEK1UbNu2DS6++OLm/4MHD8KJJ54Iy45YAcuWHQVuWQ2w7AioqgrcwA0no5of/a71ycH3pDmXFkPK5qJVpC4wv0C052rXtlI2G408hw9D7jmujqYvQh+ck+/hNv24VcJGrl0zXizxof2ZI88CpM+l1A8LpDp9v6Q+WbrShd6mrjGMuXgRFmju1Pa1g19aF1I9Xd4TYLz+3fiscJUL1xvdf7H95MH1rXUe+ToIcY09y41TClNZCcRcaputo9mo+v25uZFAsQzcEUdBdfQKgCPqUZG0+VsQonXcccfB3Nwc7Nu3L7i+b98+WLduXav88uXLYfny5a3r8ysGAHPDRVstWwYwGEDl3HAA5wfDAw9PsqvzuKQIB50MVJ80YSy32myiAU8c6MKphA2Gy2LOkt4LpBo0bvi6ZcHhfjk3bsu5UJKgcwXCmW3ZmHguu0iHEpfOQVsntD8WzlXrjwZcT9cDPQeRQ7uo5Iv3BP4/BX5NovVY+XcY9b+qqpHEwLSD95C0Bz0k2w3df6n7ioPGXGrlpb1uASepVtXwnJ6vwdXzAAMAN+fAZW7LBSFaRx55JJx66qlwww03wBvf+EYAGDpX3HDDDXDRRReZ63EVDAekWVAVgJ/7qoIKYDwwtYtzZTUZRbxgcbuZgy0SIUtZulmwusJDkyglzrAmBIpb0ByR0uqmwG35uRodFJX/GwBgrkJEbE5nDqIqpo4HdzMu9nrEdeLXXlA4k7ula5SDsG4nhrlhuyrjVQLcngCQ94UkuaA1WXniUWGJhiFWdE84N5QmMFOG+8g9gyHtPwtzmgOujzFm1KLWxWPh6xmgM7qpH8IxTsCCqQcvvvhiOO+88+DFL34xvPSlL4WPf/zj8Mgjj8Bb3/pWcx2Vg+HgP/EEuMOHoZofjLml+flhoXnP1Rs4ZoE7d6nqQUGia0pTFQMty3GnVPoZDEZUmzaCJRewHxJEAgobx216CQ2/Q2TxYakOc7Te/sj23T+TqCpq6hKkZiukdmkb0jqxSONaH1ProhhUsVnhwUkQGBwBUJ5xXJnYXFjnbkDUS5ykgMFNKZFwHGXmBoTBwu0EfUY2sUDSHNdf4YOb9lHSPgB6v6apjkSrtZ7meUaH9rUm1ynoGYKfX+YJeneCu2BE681vfjP83//7f+F973sf7N27F17wghfAdddd13LOUOFgOAijHwfIfoUPRWRkrczUfUT0BlXzZwvqBDAP+Y3ubzXqR0KQGImvUVs0bdehmjA48MnutLiTEoIVOK9QFaInXv5eTA1B1CfNxq5HA0HHkXHUiG3U4WZrj/mY25cmUQbvzEHrUdZJ8gaNrDmrswyqKmosbxEEpg2OgAf1G57BqnaXOBe+PF1n88N6HbiGuFRVNWZYY/YW2gwlWOTvRnJmJMjm2Xnybmic3IDs37CCcT3UgcePr3c06wppPvFYAurr/LxuKmDUo865ULVfuzISIiywI8ZFF12UpA6kqOohZXfz81A98UQ4qfPzowVQB4PqEuIBoFIIFkDaJHB1DQajTQbE2EsIAV74aAFXHMEgRAcAQi6R65eC4OBu+qMs4lF7tE+BZOWlX7rBaZuYkNI66TOxeynzjjFq17RuYlIKV46Wja05Dlz5pr/YQy4itWrvKD1rHtc6XOut2wmq4IDQ1kEZB9DWYMSkdYsXHEBzsA/bce3ymOC0tDb1kPmkfcMesI6xiWH4KksQLg6eeR5paJwg+be0PgAs0fUEsKLnRUdMhfegCk+U5ufHKqvagZufDw47zAkl6fpjBzJCTKxv1VnXzYFa1TBUJVSD0JsvWBhIxVm7kHPDHE/TV8QlIg4q6J8mKWm2ALxwBa5x/K8LNyUJRWi3S9SUdc1zwVLfOAgEUlsLQf/QuunsTm9dU7mEFiBkLKzMFR4j67MaISTEOFjrTRFOBUnsLfNEshkwbSKCMvbGs1J/gwq60SwwZfHaZMM55sfSc2NX9+UQYZXWeKu9BJV5CgI7MEDTR38mATRan4o5bxxHtOtBY+OMMk1GTDXRqmrQufAS1N0w0FFihYgUeTDsY+3GhEsqAzC2pXjCFSFYuPy4T2TxEX19BdByOGHj3bCaMAckHoa9rz6vEFbDfS2mSLvux8ISa9XuE1Kz+t/ce9JyGrrMAa2H+1+qm+O6I1BVvXQcRusja5xbdRsYHqt0FjwzGvtBNTykXd1W5TXrsB6WGcCYCKbYW5nYshZSHLRo3SXjMFv9KhCeALCEs7x34VIVuJGqy2QMHQzCftD/Sb0BSqkAUjafccxa719IV53ajwDIthn84PsMcg9CGiTcGT2tVwAow7xNwhOxECfO181JdF4q8IQX22o6zAd5trU+sDqRg8mLVCP8wh6IoQTB8ih9JiBMtaTlPLfiKbgPYJsDqObnx3YIbxRM4VwBWJ1tUw+Olteew20pm1LOgMHo55lNIT6fEiVqPDREN2Zqm8HlKzfS1oz6P6iHkvLcXEuCc86NpY/R7+aeMHVavJt2vwuxSD7GYwd/al80d3JLPJDStkiUO6oyWxlN8KGK1gL7DLb7+v/bhdvXvBQkd4q3MeF4OskW5YED5p0bSVNDNbhqT/bqwhrp+l09VqlRj1rfp5zAbYskJZ07XGwpOR+HyR1gpGZUzpKODNBUE60ATbqQ0f+uGq4ff/j5gzBaj7LA/AKKBTKO7rcWq7Apx89JbSdwgKN+YhUW25dMsC6x0v9YLVpV48050Dd10HfPlIzqwYTN8k6V5SDn+m4BncvUOhLspUF70jgTcOMjOZTITA+ENqjIHjKtM7zOiWu/uG4tWSnkTg1/c3uPI1j+7xbhGpeTY/L8gT16NuZVM6jC3zimz9Vh37k+Be9C5oamOTOE1gzLIYIVI1b+fA3Oxyq0MRbG0iBa/kD0fwME3HxDuGJI4TaVzSumGwJIylBjQs1wkYhwSWgdDCXE+ZaKk3lZHwDuF3ZKthGrdCv1x3SgGjdaXbfr63igiwShJtw+ac+c50/j+rny9BpDVFrtm9P/MO/q464ASWC0Ph+oaq3TwzHzFQMmEvha0x450J0bSxo+nZxmN8PP+m7iW0Hoy2gNzPm9ijQoUjtcnzUEDCnaaxyjSs0eAEOmEkZnC8dgaE5fCZh+ohXEDOCBHHHzUU5HXkABl4E2kks5LGgwategUSkAmnOqGHbSF5CrzDVy47ZS3f+dwDGODoqhqmEQEgeNCHFSC/dOCSo4VlKh0rZvy7ImEttrtcVkMUhKjhtbewP+0Bp2pg7mJulZCbQ/WMLg3NxzVGLcfU5KSXAQYIOER+utghHhcSOtjyWDCanbYYmFSkkc8R00D8sVx/YnR7A4ZhiAnLlYEhyMCVdeaLsJ00+0PERVHVITso/JapG2OI04QVOfBvL/eGNKRmJ/P+ZMQTkt7NHHuaUybXoOD4+H6QBG9bc4cO25RpXLpGsaqQ2HfxMVY1BO4AYBmjQyqRn+Y+U4t/cm3g4g6fCLoVlnGrNkUSlTxNaeVB/nFSe1G4vF6kKANI7dkr2Gq1+SVrgwEbZdIm00sVnAj5eyzkLbLi03N+5v88Dot3ZOxJjLmBRECRZnGpDqpmrFmU0LIdALjw7FJji+kiUKLXgxuMcsGA9OAtI2EJYyaDlsME0QqTWJqUWsqJ4+pmIwtB3UD4LTRKBu8e/P3PeqXU6yGcFCKKIMRh1z1OAPd3Udldblx9RmYpoj0kfMtefE+kiEC7+3ZG+KOUNYsj0EjJxg39Pqx/VkQGVo8KFe+zFChIt9poM6ea6tpmWlulZmEsLcRsDahDniw9VFbYTzVFMSbZ7F0iJaHt5eAtAcgC21hhplL5TB1+nCzzkIYs/4TSoRLhJs3JK2mj8FgkXrgHRVoZil3QJKvKix2XsYSsghEpw6EoCR9hgJ3JK1oWT+QI+gTl49bGI4qLpJ8kLj6o2Nj9FBKNo/j5j60tfZE1IdftCD4L824TzTnGMztjgZjX6z6sSmEFIrSm1wLvmx8yBn7JHTC5c21YrpJ1qag0U11K42hKt1X9logsEVAMLD1VofrdsS9W6tjwQbs+AIFldGOfz0oNCIitOD4/45ySsWNBw7sKXx5cY05kKstWMtZ1HHWeoXvkTQgB6Q9H/Jpbspr0ht1O4kAXvkeVBPuBgse4RDR40BhTlLPX630d9VNUqFpHj6metXbLYB8ZLGzcJUF3KU6BvTT7Q4UNUF3qApElUKOPVg7OC1oKnD1y1tfJQ2pWOb2L5jDnq2qEdzJDHtu1g5HlO5GzoF2rznzEtg7I4cmvRvCZxnHL0vPQMg7yPNsw/HPKUSLo2JBEhj8rrMQdMeOeD9/9gZSmMeYvXHHIasHtEWtAinYiv2wOrZHgOJOSw9oiWpFTTX1a6gBx5nL8ppE29ErCpky/rrhHhJ/WxtqLG05vvJOm1ouddoXV0gHaqSzYTry0KAm7NS4BgWC4GSDhZO+ov11/IM5+knBeumqAwzUCRzibbWuPCXkYowsG1ZnCQ8LB+0XMh1znkN+7Fw5PMugsmiC5Ye0ZLEY3+96yEieN+Z4Nunnkzagk49BBsujxAOLXUMfUZzNIktPFxXAakvAEewSktJ04Bce4IHncOc+ZHsS1bX9FSpKwaGSeKC1j1iBMxky5KccRpHsIg0xEqzhZ14SiA2DjNJqwAwd4M3RZfDM8fhgPvwZG5sliS5cAZyKnUBtBdW67nmBtN+AqHmHEcke5o1pUzwDDoccT2TJFyxmJ/FjhQO3iTJtTUYLQJBJbQcqSvWNte+QKzFmDiiacjL4l8126hJbeQdMqyQ3MNJqEU8i0rC+iRq/aR3T1RVZn60GACWCtES4wMYwlWyfvPzBdRlHjVzAND7ALIqUSIgjVQklOeeocDqRecA8KcNcrJma7DErvUBLvZOs7ulSvcl7KBdoLp12xyX1LRcnAejhhTVJYcc5wI6B7E66noYF+gcAOPHzTpkDEapoGi9Xt2I7WISUS+ZYFiaWxqfha9hlLSxRbA0iBaHmPeRxaXWCotHVVfCRdWE3H0MKfcX+70f/wBAyxc15fD3HpqYAHpVIQDvPKJtlhRmoYvaFiBOfGLPxhBbb/R+V1V2KzOK0kdLHjsPLSYuJWYqRrg0b1QDIQqkJGM8EqcyNEtbvk8NU0gcFZq4LQMo4eLassDKKEoSlkaw8P6csDPGdBOtBWJGRcSIF6cuTAFH+FoSEzrspH7gAGCqMlEO/WgCXu6g7SoxtTvhO5C3UTQptXRQsIclaLYUSqjYJJC5xQe9+WOapSDZbkuo91L7IP1PkSRxEwePLvO6WFTYheZiuolWDvpSG6XAuohY7tzAlWqBoKN7rU/aA4yzhXGiPjEQiwSsdozExRAKzROSg8UTKUkq7CBRlkROuxLjUpI4CAes5twQdMkaKsEhpiGhajvFWzf6gVYtjs3q+AQQ7hdcp8VxyT9D/499IFUCZz8E0CVqix0rNnZGuKrqFFy8CF1VCkGz98TQlSOoBv1y7f498N8jsB+o9OWs7699RTcHXNs5h1ifhGVQLYwdqc82cyXRzD6JH0gtPW+BnTXNBND+IKPyfO7ccISMLUbeowPTkSzN4jOKI1gd1JI4vVofku50S1rWebIGMgLIXju4rthilgIwO4jpLY6Rhoa5NvECMNgZuAziCUhalJLzRwo0w7wli0Lp2CkOWthFSvkEGw4AdFeZGmBWuaV62KaCVUUz6nOOcAUmmtCBJHAYCR6iNkIy1o1NqyaP6ePAfgAVRpYPoy2NbcOq/ta8LYX2AgcR/Hfw7UJUz2AwqrcM87J0Ja0UxAJWMfrUkVP0eLhWFeGC+ogPWSy6dIquqZS0Z/3z/m/8f+xZDdrhlyvVL5R0GUPf/SI2XX/oSwSmua6FA2CnhYy9xEqoAGMiMC44vi7ByjDkuLbnApsTnJu5vLMIPJQEDlRKDSMdAjSupHWfcGkF1SKSdxO+P+yCYGuyPKdJXblELeZRCZDv2daX1GSR2HDZEmW6Aod3ADBOAkQK6dAnSdoSXdst/R5WwNuXPGL1pRBvIrEXyZbBfBixkohRrE/+Y4+aB2RMDZsaF5nCtCfHcM21r2cO+XRLWn7cqjHH0JIgMAKRVSijTWzupugCJv5FI0bRMTC1OWj/CCjuGebq8Ecq48E5naQcyF7FwcXMaOgiDaS4hudikhoBCZo6VEKK9x11L++iauSucbZYab3j1EtkLeWoU1v7SlifYqq12Fhwc1CCcE8A0y9pNYslw22XNTZG4qmkuBI84TGOJxWYcNXtay1kEEyzF5hQpu1B2MHW5euQMKji89SDtBvUS0Fdn2PgpH/rmOXG/BXMi2hmVlLteABxe3JN9hq2+1nGhpNAIusksD1hCYjGMHEedgDjz9BTUGJTkfoaexC6JklY0jmT9OkexX7KSVkpWolCDO50S1p9waLSCq51nAzvyWNZXBYOn9pTunQNSW6dJbjFgpbTjWygVq9JElqK27AV9Lme7IULMsd9edpakZNdhdqWuDHzRCepXnti3V7i34YV91NvIUy/pOXhavA0uMhkWg+FrsGzsbRAtEzqYSURLqt3G1Mu7pFY4NBL4eAwaN/80NHUPJRg0W96aV6iuZ58heJcGuRk8FCkLepJByConyRocyYxAVpmDLruG+kG9D6l2nKa+uM2P1bi8vFUXbwqWx9mRTYgrd7a5UlYMUmbk7io5+ACYUlJWoEHziSCRU1qoI42Mo6oleBMqSSmqXEWysOMc2lfaG+3hdisJtdnyf6XyM1bwdlNYkHBkRjDBtwa75oCjftbgmGN9SblTBtSztlCe2fpSFoanLBB6D6wDqqkU8buzbg97QOUTZ2JB0ksRYtVr2+VaEoSi5QDyDOc1qS0oqGcU98MwuvBnBEpqCUlKXkdpWc45EhbNItCrJxWXkl71A7EbQUHAkCGtKcRKwwuswNOgB2LpQruGfMrIjdwNUh65HFYDQuP28f/B10T7E5Y0kfetOa4KdwuLp9D5CVPRZyZA0tb/pncwOjMI+XJQbSssB4GEjSRuyTBKo2cPntoxCRHTSMSkYJKAev8igbpwgoKTg2jtt9RrQggqgnNgaocQZQYs5SEvAC8hC0RrqDOjowVaZcLI+kEawjApCCdVynrawEkzuIjtH37dnjJS14CxxxzDBx//PHwxje+EXbv3h2Ueeyxx+DCCy+EY489Fp7ylKfAOeecA/v27evcdrIBWXNr1X7o81ydpSE5V1i+ScU9K6kZsVMIfZarS3Miwfc4Llqqk5axIJaqinOYcHV6iqtSIRFqG0xfuTKp0CQnaQyoZ5rEVWNJwhq2QO9p882t3dxwBakPwhrAaYlwH1ufFrH0gxs/vJeC6yjNkv9JdfyxQqsz9lmSRCy63IM333wzXHjhhXDrrbfC9ddfD4cPH4bXvva18MgjjzRl3vWud8EXv/hF+PznPw8333wz3H///fCmN72pW8NJbp2JLtaLDTHCFeNi6QHgf6cGn1rtYtxzKeiqmpykHWqhPeE0SCEeGJiIW21UNF6Kg8T4eeSst1L2XUNck8oQW50wph0l3rGAmaG4evC6664L/r/66qvh+OOPh127dsErX/lKOHDgAHz605+Gz33uc/Ca17wGAACuuuoqOPnkk+HWW2+Fl73sZZ3aFzNHUBUDBy5QdRKQYr9S6wCw14PtBdS2A6AvUOlwora92HUtHkeDlkkj1gb1fkr+NIohi0eJ+SyBqLSWqP6dMMTYQdrtmln7WWqreliXEMvZ6g3n0k5tPt4lHnv51Q6ab81hOzitu+kWd54Nhtk2BjD+KnKQ7skQr4bblOyBOGia9gs/a0UB4aB3tvDAgQMAALBmzRoAANi1axccPnwYtmzZ0pQ56aSTYMOGDbBjxw62jkOHDsHBgweDn4lgMUtfXexQtJ6+iXNfbXDqpegzSCp4MnuAxWysnPQxSc9cBQsWL5jrqKUlHkiFJfdgKVgIlscE56NXolXXNbzzne+El7/85fDc5z4XAAD27t0LRx55JKxevToou3btWti7dy9bz/bt22HVqlXNz4knnli2oxo32XmRRXT7HpJNKKe9rrBKWX3Z70pugJhaKma3FOtV5rVv6USzFXZ9hiNUeExy5ztmfxI+I2LJtynahCzQ3meBVH4sQcZzII1JZn9dDiMnSKP4//H8IPVpAea1V6J14YUXwt///d/Dn/zJn3SqZ9u2bXDgwIHmZ8+ePWGBPjKUa+jrsF5IdDFoU5QYm1aQsKRK7EAgrPaboL0plNAkNWpMAp6m90w9DBfLHrb0IVD7MeUL2PWyPCSVMe9TEu7N5f2iiy6Ca6+9Fm655RZ42tOe1lxft24dPP7447B///5A2tq3bx+sW7eOrWv58uWwfPlyubGuonJs0q2fsuDKlTLMU/dUa6yLZE+iSF20sc1mGbOUyHou7o0rk+JtCBDaqPBYUbdzSeXjXb27IDb2nNdjkKMuwZ4p2Rvp81aVVkqsVAxCO67RUoX1OqmPlrXMpuQa2rMar0DMDHNnjHPjGCb63k0s2SD5fAri5bjx5OrT7KiSHda6X7RMGIGvAFMXZoQLmQiKiyjOObjooovgC1/4Atx4442wcePG4P6pp54KRxxxBNxwww3Ntd27d8N9990HmzdvLtKHBcuPV9J2wxG7LpzhYuAqPaQDppQ3WEr7TT8E1+c+JKtkVWSkvx5dxi+VYGlrPWW8OBd5Qe2Kv3tVNCMFdrbgCBaArs2hRESzAVHnBwA57KYEaDaQvlXX0jgVPJOLS1oXXnghfO5zn4O/+Iu/gGOOOaaxU61atQqOOuooWLVqFZx//vlw8cUXw5o1a2DlypXw9re/HTZv3tzZc7AzYpIMzR4RiylJJRSWA6cvG1LqJrFm0sCgUk2LS2eyXPt+SVlGKGK57yTJgms/FbEckbkHkSUbB72/GLwXLUjIq0k/zdNK2SallRKdlrBkxHjQas94qSPFWYGDlAsS10f3J5Xe/N6Q7OJSAoA+nKMoAe/BdFOcaH3qU58CAIBXvepVwfWrrroKfuZnfgYAAD72sY/BYDCAc845Bw4dOgSnn346/M7v/E7ZjqQeqloUPo1Fstabc7BPGl04IGnRt7jGyAFqdUGXCJeF2PQ9D1ys3GIhHKnEWGMW/LXY86Z2JmSL5hiklKwozVggYtFVcghSjxnmB6vnEAGLfRw2rGOBYwgLSVvFiZZlAFesWAFXXHEFXHHFFaWbHyOFsKTem0T8Vk4KJA2atw++NknjO7UhtWw3grSLn41x6lwmDpoSKAauX5RLXyzQ7BqchNuMZyv4afScYPfoQwUv5B5s5eGzrFGrV2A0R6dmz0NecZqtR+vjXHscKypljW+EBHR+3taelNORa7NPFDpbFtFuWwBMgviUaqMU1x4NOF0AeyDOqJDichvYByJGaPqsB+cKbiXwMUyamGWoJFspipobSO3F/fQNbd+M+pB00MacpFoOKnVb1RWo5CJZZyzXAOT5GRjUeLH359KklT7zWlK4LeVT1YF+LcmEuYG0p8WXlM6AIdmGcm1cFJwaIaaiy3mnVBuX9l5WYhuoS0h9WhQ+1uenEHZpTlLHyxrz1LcEi3MDtu75a4K0hUEzpdD6PaT31ghnzAsyY61WVTX0LpTmkb5rI8UwDArury8nef7hzOcxZHgQdkbiWLa+Pg7QfkfOC7DZf2lnRpfcg0uOaIUEK3Gh5B70kghPUYp4WZBDrPBmtS7CEgQrB1L/tDnUrvU5JwtJsLiyiFCI6nxNDZvUN8JA9iWlDSqk4qxDpoZCIVjN4Z0ahoD7IT4zAJh/Qr7flWkuptVJnCdONTmNcVqLCrEDiXJ8KfE+HKQJ04zbrTp6Ui3FbEeBC3pk8+YQrNT3Sl38LYN7ROWnHW5cPRRSDEwKIeH6UwLU07Ux5NfjAz723qWdiej6o95zTJ+arwX7C4SxarwJscTFrV1mLXHfD7PY5SsAYL9Y7M8OPGZeymJtrpKmpAaYmxu+g3/MkToT10rSp1b8PKVIlB7e3tbyei1DyJYO0XI1OEc2A1MGAHhVUi7hshh1m3YrSCJcGlKDaGl8SF+OF5p01YVgJbhGm+r0a0AjGCmbzGJPskjy3GGrHfQcJFU4JVw0oFqCda1KuQpb5SKcvBBWEkhBkkbAS1x4bjWVfQxKwHNTA1V/WkwRuA/iucHs0x6ldlZFKPXHo4tUmoml74jBxXD04dRg/c5SqQmkKYhiaWm44MVUKcACmqkhJUdequRGdewpn2hhy1fhYVgSKQwK5zVGQYNw6cHNHco1Ofis39Oy9p0bT7ZcwtiStd3KkyetYbrnMLNCvRKbYtSmQ0wNXHCu1VY1qPjf9D4HZZ86Zz8HLEHZ4n087rHsI3RMiIv+MMM9QO6Xi5cm0aKb0wopODEGy4E8yZgUqd8xoia6zCbq7/vKbGFNPEzbj9kZrIct1x/xHlbl9G3TitilMPw8N4d+LROvnH7HxrL1f8I6QYQrKyMGG3wbIVwA4jwn96FUUmwLUpiN3Prx343DW/8OJ0tHPahBs69Yvo/UZx8oYlk5LLFItD66wLRDnM1U0fOhK8FiEPbqiUk6uQAQyb2nNmOqGgtSVXvUvkTvN32LaBaCLAwKc2PpE+3XIME2I3VRkqpHKjKTXSsWv+frpkHJA3QvVqcvl/uuic5lrZg4zp7V2LsY7Q3939vmCmKqiZbrosJp4lCYSTE5SjD2M4sbcEv/riwq2g/8ETlcJtZ+jHBJ75qyUSybQjoIVbdx9ExM3SQFwrL1IruQhaGQ7EglCJbkEh4rbwW3nrGNC7fNvadkH04hXNx9CbH2tDCFWIDtgMR30aS4nN25sX/mM7aNa77/YGMM1MED2/Kow4hlnA02ehPB4kDVtDEVY0dMNdGqNLXIPH9r+KByuOO/U7l2KcdXc18xxlqR2idLVH4uJELYl7QTO9xTnUtSbZulbKGCo0FxUGJunReNoFvdoaOpuzrmecTQCBVuJ+opGYmnyu0ztX1HVdwReyZGimd0zrorbNstkeh46di0qoHs+RJbaJLhOtM4OrwvGG5TkXuoWRwzShMXa51dx4UzwPfh7GLpB0bOeMaM2rFnJeM7d/DF1jONcaQaAurB1nVtTyLruJXIYOmiKvBlZH+mIOkluU7LvOUitleppMf1S7nWSvAQ2HgBwI1+MrB0iBYHkxgese+kHO5WjzwJXs+u9VtzfiiBlA2SFcAcOaRSDn5trGPjmNIG104JqdkDr7Eu68dCAPtK56MBe/pZ9oimZqc/k4DV8ziHyCx0UmXVY5e5x73jpJjEEaZaPdgglgeshF46xUEhJd7HaqfB75BLLCwSZyo0lWDuhqQBmmZVBhPLVWpD9eFs0YcTkGS76BKXpz0TW1eSFyV2RrDWIXn2JazrbAkK98HVADByLqCfJmnCLyLOE1xsF01jZlHFcqaQFFttF0TMAc650Ku9kHZnyUlaY2Oi8dUm5W0GEHorlZIEYrBypIslhYxHinQLEL7nQnOvC4loZo/IuuvLEzJJgkaxZ5qaKREBweI8/0x9M0gflli/nP0vuedL8ZBdQk9qZQ4K4MmdMHfComkUMWNnLE6l5VpctaUtWs7itRhziy8BauyfFMcXZA4Q2hKDUBPe3eCBxT6joaSUJalsuDWmSQKTCHFopGhA0ovQBypxcx6PAO33lCQs/DdNU9QKjE0MVckNxo+ZKVrtDVMliQmDSwCPDZX6OrQnOtAZMf2SVk4AccpC7NMGYHEtpv2hz1ozcVjqLIGU+jRO0GprxLagvg/h0mPVlWBJ61MjWCXQklKM+0myRWnzE7O5UOeR2LMaUcHSRRekSlG56ypw3Sd/93FeUUecBcL0S1oMqqoCF3NNoa6gdNPnTIyF+yhxUPl2umbi0FL90HvSu8W8LrF0aAHdfFL/giwOOPuFvalhPQa7gQbNVpTCjcYOmajzQkSCz0XfmgwcL0bHkostA5Bty4J9rBV/5OGlCCxd4RgoC9GjQcrSeOEsHky2FtHWlrI+Y6mhtEw5g2FAdSu3o9SfmBOUi+Qx7IDplrRS6IqWQNeKPjdwSl/68vzKSf8UQyl1WiBVKSqcSdko6QHFrQ3Lu1u44j4JVqqHrATL+k1NsSXdi/W3ozqajSUKiFNZT132cOeyuAcqU+a+pT2FycwOLmYgMgoFsCQlrQbVAMAR15qWfnzEmVmlrVwpzCOWccCSASK3XYCwbS0g2HqIcXaermMkZuhQiNMAvV+O7UkCfZeYcZ2+txTUa7F3au3kgkuw68HZY1OydWjEgo6DNj94HDUPtVZmGF3iGpeVbMDcfqcpqQajJqq2lMW9h4Qkt3jiCIEkQydJpVobKUkAYvNekXGfwAcvlwbRkgZJ03GnulPngFN9cP3qG322xxEs/9ty8FiQm8jYXH9EHdIV9P27EKwuSFkHFsJvCe3g1kEOcjLUWGA5YFMza2SASneV75sW5OsxSQ9oD6pl0LwqJWR2e2kQLYJgATATGnw3pnZDLh1z6DkG/Yi+2MStc2W62kpKBvNSxDym6DvQg4frm6E/rQ1eVe1DNjVlTYuxUNZAStydU/rRl2QVTaFEuHMpGS4XO0TBxV4BxFWnXH9iDEJpwsXYssT2aO5CL1X4ctTWg93g6+H/FQDr6cedV833uqSPMFqYqRhjznlXSqrBlj2Q2stgPAZU2irMkE23TYsBq4929XgxOCeXy0XqRrLEn3SMSekdfaWWiSB53jS7SLvy8uOesmH7+qQLhqZOaspk2gj75vhTnIHUehLtxwNEgJrrIwnIbLcchIRsKYK165Zfz9MvaaFPWbe+XOwUjgaIxJWTLSPF7gMgez310aZHH4dgbkwKhuFgTPo8RMxzioMmjWkHZKrHoSY1A7TnKCXuLjUuTauTXtcyhKesw9j7dLWBBm2154X1ihOkLHHNYQnLt2ORJnH5wQAqp3wLjJHA2h+mLKDq1yQwP0ZewsMSJLfX/L1AKhWktULEeslJWgAQV0Hl1Mf9YEw6H9o0oSd7TVGXWs7e2Sdy+s7F5nX9XEouU5OTWku7V8Juo8SnRb/Im4KY6mwS9skSiI1pFwI5IHNbcEymX9ICIK6gSD/u/xYmp8LivbS5ShHAFM4/hyNJUYOVglXioHYdH7sVGU9MlFiukwu2TpWwsFelZOPB9UvONRRWw3Qq0bDm+fOwSEmWz2VwiKaMUlzbsa1IyhNpUWXGQN8/8DDUNTEeFZGWxn8TyUsDup/8kUnh8/VNHTFbX+wejtFC9QcSF5a2/JhSp6tBBTAft0t2+g4iLCVJSwoGbG4zxnsOrK1DkKCs8S2xzU3r0epMJWh9Rcd7YCN6HzahEVSpKjUuyYNm706VlCXb5ELYImmbeE3h3wvhacaBMhzcHObagCzrIZVgcf2S/pfay3Hk4uqZFOjajmFCa2tpSFqlkcq1SLFf+H6qJKTZW1JiXmiZPhYW6/zi2lyoYi8IEOtjSQnLX5fGh5vT1nwnHDiKCisL2Haby22nwuJlCRC3E0uZUwLbHWMz4epo1ppsR2GldQWsR13jOViFai+8xqk3ZD26NpKY2GwbZP2y/dScyHI8KznvwcTnWOBYMm8XK4ilI2lJMHMwBg679IFfwt28az19QeNQYypZzUWXI1gWxJiDHCkk9kzfks002U/ROFQV80FETvKqkCrOl0mRuoxlOSLQOaODwEhkey1bnuthbBqkqsJ7xNKRtDz3k3PwYG5aQ4q0kiJtpfQRX9P+70MlaDa+a6o8wjlz38CKtcURq9zYtRKxP1GDNpLMUj0PMbT1KUmRXaQ5Sbq0qLmaZtuZSrzEQ22W7P5qCBeM32uOsb3gsgw4ScJErOj3sji7G7aPeUnLS1f++jzKzKOslxixZIkePh+4faOcDfLX3g1qUepLoKlAERGfZXk3oGhMVt+YJu6ZQ0vfXyj2iMtbl4PS45vjMdoccInZ0bW6cNnFhNR3xWgFYzNee9KhqxzMTnA7b0t/AsGiauHY+SKpBcmYiARLUwtGnM2S4MezqFdu+bygvROtD33oQ1BVFbzzne9srj322GNw4YUXwrHHHgtPecpT4JxzzoF9+/b11ocolzUJ9RrHAbHpfJgpieqQa2g5EkiqqVzHjJxnpA/SSQZ2iSB1lRY0J4tUCS1WnwY8J43TBlNXThsxlWsM0ocEaR9SYxMBQsJVjw9giXgE8OvV/9B+0j6T9Z0VFkG9BLEdy7oPGsmCzF9Mva1JMxxyzATWcoMBmNJITRi9Eq3bbrsNfvd3fxee//znB9ff9a53wRe/+EX4/Oc/DzfffDPcf//98KY3vSm9AW4su6jgpBisPtFVjVcie30plHYy6IISMXN9eV1S4uUxLXF+hdWpSQykRDiY69kEC//dssMqkkhL6h3PcYw4x/rau7YIt++JlYSU+e8hI0ZvJ8vDDz8MW7duhd///d+Hpz71qc31AwcOwKc//Wn46Ec/Cq95zWvg1FNPhauuugq++tWvwq233prXmDbhpYhYCkErnY5H85rKBeVgsySpSvjbEFCqSVs4iDZ1HDnJwDKHsfGQ1JPWPmrScBc3eXqwUmkkBmnec9ewdT1pXri0KyP7V2AHY+aKLZcDKl1gT0GswqO2LPzbQ/L049ZailqQqw8g77yyItaP0qpFAb0RrQsvvBBe//rXw5YtW4Lru3btgsOHDwfXTzrpJNiwYQPs2LGDrevQoUNw8ODB4KdBD5S8OFg1YMXf78ppNxx8Hf5vgXbQSPX0tVBzDkyOWEnQ7kuqJ+5+LijxAuAPPGvsF11PXJgE/R8b7yVCU4oB42xbXCyZAZQodSZSIEg6VicFycWdqV/s+yRVcNo+n4LztBfvwT/5kz+BO+64A2677bbWvb1798KRRx4Jq1evDq6vXbsW9u7dy9a3fft2+MAHPtCpT9Yo9IlBU4lw3l80NkjzHvTeRNK3wnJRqh4K78EXi5XS+gUApswOFjuixQPLUnfMzZ6boy5rlMbDxQgXhxQP2RSkHMrGdRslNMaxbGW8oNIrJ11VZK7mfQ5UvA8FRwvaJgerlFUaUlZ5D837dUKB9cXJ6p49e+CXfumX4LOf/SysWLGiSJ3btm2DAwcOND979uwBAEhOB9LX55+LwWKfyhX5Lc9oxLNv4MOpq9qS1sdB+oquFiOW2l6M+FGpOIZJHmA5cTy53melCKSh3SQiEDhNCH+HlY9/x/L2ceNEiEXwgccebO29nYf4q8o9fBCyuKS1a9cuePDBB+FFL3pRc21+fh5uueUW+OQnPwlf+tKX4PHHH4f9+/cH0ta+fftg3bp1bJ3Lly+H5cuXp3WkqoYkuR4ADMZeS+JESZtUWyjSM/QrwVRKstRvjR+rQ44s/E4Y4uRpnVrb1JNNehcaJ4Ofidm1XN2WDHKBpQOcVUGTqlo2q8hBZGlfuuf75fskScX1IE4scH+4PkfdrwXphZNQusSwiao1I5/cqE4Hw29QqU3Z1lE0LotKWf663DC0YrJqRuLSEPtWlqZR6ctRCEN6/9qwRrrEJCooTrROO+00+D//5/8E19761rfCSSedBJdccgmceOKJcMQRR8ANN9wA55xzDgAA7N69G+677z7YvHlz6e6MoQ1u7NDx6LpgtAOAcyvWiB26n8Q9xg4hy6cqLLASLgD58E09lCXipan/Fpv0naKCTSX2eF1pbVDVF4B9/tms87V8TyqL/68FRsMXMaRnSs56kbq/PcEqJA0HMV1SsHgOQ1Ga0En2WAzpXmZXihOtY445Bp773OcG144++mg49thjm+vnn38+XHzxxbBmzRpYuXIlvP3tb4fNmzfDy172stLdGQJzuQD5xCd5ISubVSIOkleVQDyL6rxjaipJapMkrhjUQ6yQWqGUF2cKw8Md+rG6c5xgUtYjx7GLmgJFArfEFkmIrS9prrCNCwP1H+8DjoBl27R7khYAoJzTQ8wOycxzZZEmU9/bohYtgAVJ4/Sxj30MBoMBnHPOOXDo0CE4/fTT4Xd+53cm1wGrsVlKMcTdl8r467mff/BI5apyCWyfSDHyx8Y+1k4J/X9KYOdCICaJ03IaJmEv49pIIXh0vwnvL0lerbRRGJSAUKLOOV8E5VHWC6Y9FSlMDh0Hags2zLUuYUZ1sUyf0teO67B1JkK0brrppuD/FStWwBVXXAFXXHFFp3qTclhxRCPl0ElRjfQR8CtIWqaNobUde6+cDB0xWJ9vOYYouv4U5EgZFNo75PZTkiik+ZtkALeYraRq38eHp+87TWHky1FJRmPuEtSMLRtvq9sJaxgTLGr7I+/kJbrOCXcB2kRIIlb0GQHRd6YfbuT+pshwbOn6Pa2lkzDXQzskukg7loM216tP45Ykr7QSKiMAmwSZU28iWC5Yek98zTqfIpeuPNMlqFsjPKrxOsF2aA1p4NqzvodULvXQp0RM6zfep5zjieQcxPQ39XMkUQQZTHh7Dpt0V1ObxTztLCEZCkzv35Vgkb/FcS9wjiw9ohUTkTW9uVZnX7C6SueUyYHGzaWqBrhM91y9kHC45BAsWtZCFFIO+RznFrxOVaktkcmSVE25xIfWRaUiSxulYLEvdYklTJUomCDiVl2eGPjfRptPY4OzMLF9QUqA0MRFIps2XndVletjYcLSI1oUVvsGJ4WleswtBsTsbNxBGZOsLG6/EqRAaYFwsX0d3pTbSIU2r6nSFUdENTWxpW6Lkw6VzgGQqo44yEiwMAlsAK9BTdU3shMDR/prIYoUEmEvmV0ilWkiTiriRyVNUljF/4+JV6MdiUiVBbB0iFY16Jb1QtOjR1xu82NZBK9Cq4OHVAe9T1VTsedTXMIlFQH3LFUNcf1JQVfnFg50Q6YSLPp37NkUycAzYNbyUnaIHHVZilTFuWrjeDVLHTiWj3qnSnUw9iVygW+LIy5STGUst6CVYKUQtBwpnzA4MRufCmnNBWM/+u3c8N3m59vjXShAemkQLboABhXAPF80C5KBPAc5jg85iU+pAZcepFKdGsGiHn2aZISDj2l9WsBzKmJehrnE0eqRl0M0u6wjSULuKkGIzyVKVZwDRsrztG0chE4hSQCteowSZUNwIhJra90nrIEESSTKhFscdgiDyBIviYBiZoHuo1Y2DwgJlzLmlXNQddkC+Y8uYtCMAx7Spx8sB08Jjl6SavxPDlKfTUmAygWYcmPIcVD+WkzFV1K9lBr8DFCGEfGw1FWyPQ2mQF43/onVkTJPZueYykBsB+HfUkaTUuAOZa2PHAEqJGUlaY1iHpcI1ryH7P/a+p2QqnhpSFpMjEQLUmqiHGN+jjMHRaoDRtfDLtcGQNVK3ALmsnk0m9/g3QaC92BJ5Ep0mpcfV6ar5Aggq401cNwtZ1sTA4fJ88p6YedKk7CGD4n1xdqLqiYZadrkci4RD0qwWCcMYT5y7Vgk/VOnPdBVFczVIUlP+F6fgdgIS4NoAYw2y5xgKI3YhCaVGLaLY4fVoaSU6onWSdV9MVf05hnbO/eWzTrmmUfta6ZAXIFgWfuT492au04tBMvaB64op8KKHZopNlsMi8o2h2HIOWhTHQ5a9t0JSdxdYIoBdfzv1HoSsHSIFoUjnB8b2FiHKger3ccCydNLi62RDOe4Hk4Nl4rUmDNXj22E3FihskFGgBq1hQlfVenjgPtJPZS4MdDc2VMOeotUpZWTQO1uUmySh2afpLC6yxc6JKOGfKuqUbO5dt13AxLgyyFVIqqqoTFlHkkVpTzlUM5C1YNWQsIaNzli5BIZ1ouxvAVq+m1a2qLJOVzwz6ShcSsl0UUak2KORsAfuyvSn0Glz0VXKdmSbxEjNnbUUYL2nVtf1rWmqtC6qiTzng8O2VT1ODdWnCqQlNWJkWH9cQepxDD2vhcLnl8UsVAV7lqf6j3sht+hmaUhacUWVu3ycoIp7rRJoNIEp5Zq7gntaO1bDouSRJiq/Lz0UDOeSbF6fN/QGPnngwOxQu5JA9ClIkoQUgmTBCvBsjgwUPdvrSyWVlOyZSxGxGJ+mHLRBLhdvQYtwJ50PRGyIo4XHuYzzrCepCDjpi9Cv2mWjZGmZJbGqQQ4dUxXY3os4wF1+5b6wSHXtmHMTtFcb1RzRjf9gSGrSMKCbWUF6Nv+GKtfskdJbujaGuIYohLrrgQUOyR2mU6SsmLqYCYGLcb8SC7hRdWCFIFdlwuxqMuoxLowqcr7F09rZcHMptUdzrnQ3kKhbaoUxA4gqse3xF1oiHmdpTqCWA7QIJZlbvi3xDWLLrbygcYSLqh1aSsVsdg4LTibe95KoLkci3QMgmwDGWswBpqKRwJhclpeg74OikBCCddf6/CcU2y8HLQDmLsufXDR6vnWeEi6lrdf8CwmZhYCVlLCohA9nUfv64lsKmGJjVcPtiyPJyXR6g1dJIEchwETURl0I1xq3RX/N1d9xdimhHem3KCYhw3ANuZaGc7mxDnkWJ8vLSkZVN9x5sh4KOKDKBbe0QWxPmuBr5ggcNkyJPR4iAJAv+7ei0XVqyUemCBmRMuCFO5WyztnrUdKIcNd4+woXJ9SAj5TwEopo99UkmypP5lnBe+vkHARaQu/Hw0+zZFMSmQl6eKIojElkrQVEBzlnTmCwTkeWIgXjc3iiEjLuYhhmqxSEb5eozZj0BwvstO+ZT7HOXxodjJLfB6tV4LGLGhxWJZrRlTeC7MDpt97cLGC84bqwoVzmS+s9XGJgEsAEwgts0dfnphVFRLtnHaaYNg6/MlBTFVm7YulDTazi9MdeSRi1xU0nRSVwFO90iiR4Z6PETWxr0z/YpikN2EKMvtldviIOWBw5Sxtdzx+plvSomOvZWhAqCrpcGU4HhqvBJB+OGJJoKtdYjEY6gU0Xn9+eJhEw6zXFydpmKQtANa+JRFOV8vxeqUQ9IP2gawvbr0Ga1eyR9Thb6mc1scS60hS13LAOQRdDVAPwA2IXYsjVtz/MW81S6aLLuAcMKS6qWNGn/arLvD9Z6VehchTm+t8/0R96UtajGF//E/k9UsavRci7gtgYgufEhcAEONrqqoyxdqo7QQSVkTSA+g2DrkHfMuxhiFYJTCJObbYDUva9zhpbRLAzhb0uoXgDAa8Hc7wbLJXHx4X67NdnckWgaQ53ZKWH098kDU2jjDNe8jRMX/jyZe8tUoRHureKy2krkG1VnUTjRNLUuXwBIjGWmllYC7iukw4bF6iq/OIguTpyIUkaNAybEuhDFLgcs5hb3WwidVt8aDj9o8k6Tb3B2EfW6puomKU/i5hS+LA2YZScg5aELE7sh6ZFmjvVtLxqqmTOSOwB6Yh9+Esy7sEvzEwB76I1WsBrLFRElICaku7USOwUhW2czDSlyiJMc+07lNgG1WP78mCazclo4YEKdNGDF3XvoUJUm0fYR8rToJaQK+0BpNeJzF4W2W200gCoeUkTDVmbPJjNd2SFkDAoVVVNea+fZ6QuVESXSnnG82J16qf0duneqZJHLXEtXJ94J6XYomsoBkpfFspHkkcOI6Ys11QrrOuhweZxqkR1UtV1wCVA+f8PCGJqwux6pJ5As8rzX6iwRqkbA36ps/RsdTiqqRnfPupTkGaml5znpCkk9TAXml/0/flysXmrtH0KO/h+9b1kM91rY9JXNI4V8xZx50Rie/VJY3T0pG0RhMyVillcKIAuiMGgI3IaCgRoFw8FqgHm4ik4qEEy/9dVfw9DE6FFAuMXCiu2dKultHdQ5p/7bpm2wva6bCOSoyrhYiUaEOyiS0C+4wI6RMvpmc7Sma0rhJlAIpJ0UuHaCG0CJcUy0Q5caxTjonF+DkNFjWc9ZDRnumqt+5DhZZqQKdZBvzfEvePiF3jEVohIsbFhllhjmujRndOelHqokmaaRiBhhQilXto0/Gn6lbpR+2K0b7E9UHrM2VqOMnSeqCLe1XwcGx9PZ2LaxPWQV8OOrTu4DIzBtK4aCpD2vcJMAJLkmhloQ+OfLFEsucgdrB0JpI6YXLONT8NNCM4tYN1lYgpchgLjNQM6LmQbIEWaMSnFOc+LbAwAbFxTcjC0dv35IaVj34LazDXuURbKxwDSpG5zKffphVDy0ZCskNwXmLad7WwPl+LuZI+4RGLJ0oFtq+lEhLuG0YtO4hSZ5Z6VNkgo3miXlRuMFrfVQXRXGmtjBmDsT3TwpjE7DWcfYmLw0sZR6mtLjZFD0pspFgxCdSjEWe2CNo0SJzcu8U+EyJBIwiC16nqfZjqOm6F/+aWJHn1SayonbqqwMfIBfOJ9xW+lgq61jQbY4dhXvpEy4LSElEsU/hiQU6uxE7SRsIGJYeec25MuCRUFVQwJHIw78siotI1sNuKEnPclxdd7iHZxR1fQirB6uLurnL8Pe9Jq8TV99rExKh24+BujnDl1B27V4hAL0n1oMPiMDdQuaqtLqmUUlHKxkTtHpwtjPtkCadSEDzAelFt4LZG6odGD+85VNxuy9Yg2IZidqBcr7hY7kf6jNY2tsnl/ARtG+1iqdBsXTnIcX+nz2gqu67EMGoDq9PUbFxZzkadaotkY8uIDQrNk6MExaLWiwFLkKIdL7/6JUe0ih+gbPLURAcDWp90wHRBjiOHBIMhnX6hOPy2Urc5KPa9H+qUwUEj5u2O8XM/IESG9kHCYpK6rYgxUTmES7PDWYhQn/FerRRjkffrEnwsIceJxjJPXv3el4oydiZkNrt01YMxr72YaoxzO+Y2hnNttROtW3JhxnaoripKaZFqeeZi6rJE3XaQKzBHL46ebWLuJDWhEvfSeharCaMvEbe5scBjmcKMWAlXamygBGk9pKhOgzg0Yi8DCO3EzHPsHHKefrF4vxzEVIW4r7gv+DeGJUEvfjaFqAVfL2DGW3qPnLPA12d15GmdY8q4Uq/sjlhyklYAFxkki9STyw1z9U1KtUjRdaEoqkrKpbFcm8XzTHOssEBSt6S4kAPI6lILLPXnuKZ38YSU1EUcYqpkWgetpwvHbnG1tiLXiUBrM4dgWdvUUDoFE3VLz4kb7XhuVR0lu16I1re//W34T//pP8Gxxx4LRx11FDzvec+D22+/vbnvnIP3ve99cMIJJ8BRRx0FW7ZsgbvuuquPrqRBI2A5G0GyMVjVixyxKMFpUtfwGPeF+wLAu6MHjyD7U84CJarHtuqussXFNOWRmlA6mFMPaAmdXeMLMRi0LvoO1hgri3chK5UIn3nh7JPYjhIjGiXsT9Y4Ma4/PiEuDoTPsaNJ4NaOJbE363putzN2UhFSTVQsSQDA4so9+P/+3/+Dl7/85XDEEUfAX//1X8M//MM/wH/7b/8NnvrUpzZlLrvsMrj88svhyiuvhJ07d8LRRx8Np59+Ojz22GNJbbnevX4K1r/Y7RdmiUZRPUQOd3FjKCqJaE5CYQ20ssHTsph5SI1jsqDkfOdIyuLhb0yiLNYrHISWZ6RvgAGEknLLyaPg+3ukBjjH6uhi54YOdtxcLYr2fjl2uZYTkbDvCqC4TevDH/4wnHjiiXDVVVc11zZu3Nj87ZyDj3/84/De974Xzj77bAAA+MM//ENYu3YtXHPNNXDuueea24qKmd6WAYB0wwl0murutcGP2Rx8XbSeqF0J5a7zz0vta2qfpj7hPSQOO2iTfL9JemcUX9V6HdT/aP45tHmaktxmaGyLg8BzqfX9LfxNJwpOJ69lbm/6TfrjyL1cguNjarg2xecEzy8af6jFIdJntLbEb2dpz47XkF8fFQCILtea2jCF8MRsNZhQ0jXG2ddyD2K/Rpn2W18vANDnwbImLF+K4OK28N/cmcGqsQFaH3jE81mIkSsuaf3lX/4lvPjFL4af+ImfgOOPPx5e+MIXwu///u839++55x7Yu3cvbNmypbm2atUq2LRpE+zYsYOt89ChQ3Dw4MHgJxupDg85Ot8uYG0FRvd3a9spnKuUsoXeA2hz1JrayYNT9XlgVQzmZK3GbyCEMeYmXhlUe5LEIHm75a4H6xx1/doyriPnOQ1StgR8n16nEpfafsI6LuEhZ3VSiEGKT6NqcQBdZR1DIoNuzj5jAf2uWEFpqzjR+ud//mf41Kc+Bc961rPgS1/6EvzCL/wCvOMd74DPfOYzAACwd+9eAABYu3Zt8NzatWubexTbt2+HVatWNT8nnnhiWqdSN6RUntNxS4bz3MPK7DAgqLe4uCQNGuHhPDBJnIeISBnxsxTcj9iGs9vNOCcYPFaxJLUapPeQ2kpF6vrNtb/mgGNS/N85fYvlD2y1r2kpjGtDagszItzflvY5GAhCWw3OxPr1gFZOwlLu8AXd6osTrbqu4UUvehH81m/9FrzwhS+ECy64AN72trfBlVdemV3ntm3b4MCBA83Pnj17MjuXwpkZEt16pNpG8HPN3x2nguPwkz3NOJuDMA6ls4ikHGbSphcCI1sOHalenLl2g9g7pawZabxTvqNV2hNNAjdeqWOYspe4gzb34KUSM1X/ptpCLYQzRSKxSGFWME5WAVKkXgsKZfwovopPOOEEeM5znhNcO/nkk+G+++4DAIB169YBAMC+ffuCMvv27WvuUSxfvhxWrlwZ/ASwuJ5aD1yTFKEsxBzC5Q9SMasBsSFJxC7VoaAkEYqlrkIbjDU6p24K73WGfxR4Jw7uZ1xowI8tDTr22eMHmAMXwick6THHiE/HWFPrcdIe7id9n77BBW5Lh67FfkLvl3CskGCVlGNtSOs0VRVnIVypHrIAshoXe1NKpoKW6QDty4JSFkAPROvlL3857N69O7j2T//0T/D0pz8dAIZOGevWrYMbbrihuX/w4EHYuXMnbN68uXP7US8capTWMKGv+wZcHT1cLASL2mW0MeDsZfQ9o0Z4NIaijYdfWsWyXUiqwQx1UPNZE3yPbm5tLtgQCYUYdMmKkmrDwkSSI76pbWOkMj2dNBEF1o1VXShJWf4ad/DHvPFKZMqQGNRUk0AKWCKLTQVoTGsXXsP3mv+x9JbXpeLeg+9617vgR37kR+C3fuu34Cd/8ifha1/7Gvze7/0e/N7v/R4ADA+Id77znfAbv/Eb8KxnPQs2btwIl156Kaxfvx7e+MY3pjcoLeacZLALCeqdFC0vHJSiqG8g1knqU8ajqBSozcCPDfa8on3R6oi15Z/HXob4fwyNYHHegvh+sm3VYD9NQVWN2VTaTwsHTtdXTqAzfgecIcNfx1n8NZuQJct7LOZLGk/aT6wyTHJiopIxs2f8ezDrOvR+ZeaA9qdTfODYS7lJpMv1vc99b0RxovWSl7wEvvCFL8C2bdvggx/8IGzcuBE+/vGPw9atW5sy73nPe+CRRx6BCy64APbv3w+veMUr4LrrroMVK1bkN+xVNZZF1biPK267AHZOlCMWOZJZczgzh18uYsSKa4dT5eDyA+FQkBZzLH0MfpZTDVGX49FvGvfFSnHS4YYPiWqUHd6hgwHq8Vxw/WzK0faUueuqhsuJPWqlFKv499JAD82+wLlb43sA43mzZCSPnQfcAYyvceuWqzPmdcep2lIPfhxeglO/xQh0CXDptujYSGrDHghc5Xr9+lg/OHjw4NBN/j98EJ76v78D9YGDUH3fUVChXHRufqRC8b8BlPgagRvWPHY0Li0WL6U9S/spPUtVVa1YHMdft8RyYVgPKendGK+8ChMoKbMAd/j7A2F+fkxgmDkNPBNN9s46JIQpKuSUQ5xjkqTYqqB/CURGG38pW0YKcpkpvD4ET83WuuAICsB4HdDMC1J57V01ouVVqlyZmvTF8lVi2q7kXESeaR3R0vqM2tSQRoH2BSA4Uyo6ptw+pfVg9eD8/Lj8YACwbG4oxT12CNwPfD/880+sgkPLHoU977kUDhw40PZTULB0E+Z6UJVPDBa1YozoNP+j4NBcxKRBD/aQdyyxCtQxHLS8ianB05b+t1QmZFM0XD7hcvH7ATSqPFa9oXHdJCh53G6k36Vim6RDKXaNght/DG1tW7UUVuLJHZDSPI8gqgnZfhj2BCelU0iqwK4oKAs0akIPv9dpsm1N6qpd+28aUIz2ajMX/l7qt7ZYZ5oyY7L0idZCIsWuJnJkykLhXLc5mwzpgxi8aIkFyVFdksh79nAady6Pm3cOAAQCabVJUDWh9K6BxFKAMaEoqR6m8GuSHnCph2zscKfDIWkIchA7PANVWoRwWYiUxAhgyS+HSHE2r+bPiOSNwamASX0tdLJ/GexaVGItiOknWszEFNN40gNJkiY0cIeZ9nyKalBr082H/w+IxCURK8tith6q3Hti43tzDenMg/fg1DLoPVzksOA4dbqhY4SMvqsYeG6RKKfMOQhDUzuyIO8ZUwlKKGn45xx8YuViMHoF+jNJ/HSPoxkpBFVezF4qrS+8jmPqQdTnQNqi/U5RWxfE9BMtj9pBVupgyQvPA7sX0xyAJTkIaiDvuiCoU4c/MKVA05i9jh7sGuGihudmwSOJCyD83DclXAoxiTElFbaJSHYO69xJBIsjsBapiyNc3LumSlvcWikl/RnslC3EVKtgIFjxCoS2DeMmMTMxYMlUk1KF9RsQL06y4lT8bD+MIQYx78LIe0dNCQBtF3gc5G9VOydg6RCtEcafj444HlDEbDa+zoCgCQegdOBQzztNCtCICE2iK7Xd8hSba/fJLDEyKgeNewv6S8diTLyCpKnz88jIyxx6FjVMYPCveA8nqW+Y27UQq8AJBMbtWr1SOZUObSMGzbsxBks7knTPOdWgOsWkrxkHp9kjLdczTwNmUvE10iZ7uHMHdk3Gh1tn+JkShF1zBkkBZ9fqM35VQI/+qxNESuAeDeD11/B9Da4GNQlorB4rF4h/pH74+jgjq7WNrohF2aeCeogtNPBBIh1c2nMaYpJQbFy7ECxLeY5gGee7WBA5BzoX0v1YOSu4/YUkCocIV1KdVhV7V9C9jv+3BrnH+lFYmtKwdCQtV4NzxE7j1WEWaQCDSkoSpyhJTi0vtIgaLVU1Me4A+lNy25f09kZ+pYsNhlPNWe1yAOkeS2pdcSmrVVYKH5Dql7wnMbgUXZq6kFuzudytNpfcOiFMXTYhynUskRgXvyY4bYUmpWgOCtz+xc/ROKSR1F/cfi71TUNFzj2p/tjem4R9qoBNd7qJ1uQl09HhxNi4NJTwBosu5kUimQDYDdzWw1CTuiT1R3O4CM8FRApxzDkES0NuRgxNrWM9XLqkVkolWExfA1ftogHzdUi4tD6VUJdS20wXQhUbB4vnXw4javEMzkFjMy9vu5Iw3UQLwKZGisVqidHdQ1Vg68OFNKMG5bI5+4RmW8H2EA704GQDoAXOvE81DYVksFekjTBhLSdVaM4PxH0bp6GxflCQzWCACFaqTVSDlosQt5uLLjYdRlXesleRdsQPeuJqR9ebdESpkA5Cyf5J513yfKP1axIbvYZtq0Z7nb8jjkNN1kCyqtfuAczNlSYxis4YOJ4yqi4vR9CWhk0rBosHTs0sQu4RjYBwKE00UjJ9AyjcY+YBydnCWjpzoiO3qAL7IK7+cDHYNlqbdoI6+gYt1aHBtskhpe9+roitSiRY6FqKupCWLWrzolkpUuyh1rFVPAFT0PqyQKvAIPwxVaoTLPGrBqSM1ufFhOmXtABED52wDCFQjS0KeXpRT54UdYbmCh8TnS1u9JIrLH4PgLarMfZ8qujfzOa22Fz887FnLfXgumjyUCWZaCsrAEWOQwfNsGFBaTuAZZwsh6W2lrTUWridpgyRZIIuGSV7xK2zDIKmUra8L53nkvZQph2WYI2kfICRZMWN6ei51lhI0kosITO+1zQlMxoiJGkqE0GMF2qjBJYG0eoKa6qk7Po1vbugYoyhq6pFaocbiyCzdKaNJgZ8wNDDxqdZGm0CNvu1hFS7RqtfmePcpc0UdSGniqbXab0eqQTL/y8xAtyzviw6wJIPR45wdRpfQvy5MeTaFD4yKiJ1HVhiNHMcc1jVHplH9iwg4zPQJbr24yPCVdcQhNt0JF5Lg2hxBn4tRx7+zUlcfUFMdMkQrpJtSvY07nW9BMYRrlbZiBcSl24maIpsBs2mR6SuxkbAxbvAmINtRfRL9WvIfbZriifRJqhIJVIdAHEiJUkm9FDHB552sOH/EdPBIjZH0sGLkfsxRS7tk79PJfbUvUnfi5G4WlKbMWNIIKVJ0OY3VRLVVMYe4lfFZzatIUqrWlM/sBdzZTfYUoK2LUjRdXv4mBBOxSjFnJWQorTNlKtWixG4UrDakmLOM0X6kjHn/jkPLhg4lWBNK7S5yAl2LlFXZSNMrWfQvEWfKzl31rpihLDAvph+ScvCLWjegxnpksKEr27sAu/tSVIEvZalw3sjGtLfAECaCokD9UBkJTDOSzERVi6Q4+JEl+8x914B75EV83xroWRkP1X1ShKXJN1L+eZibsstr1SGUDX3rHF6irosBUTFO6y6ox0lJllh2yh2WzfYd8yQpCJO8uTWNtEctBCx9bGSWquPQh9SQSXTFBsrLuccgAMAl9eX6SdaElIO8wVK/JiEWDqiFHDqSYDxocjZ1wqqT4vYNEh9uUGenYNDY05A2hhaCJZ0jYO2RiT1nRU9eFOa10FXG6GGQsyK6m2pXcf3Y3Y76hBDCFdvkpfVxl5Dvt0yAUuTaHFqMIaItSSmLu1xcVtd6vCIHWJWycwC5ttUrZx6ANnEqxOx0hwOuA3LHdKIM2czaueEDqRCI/6CuzK5oNQ97mPwTbGqSidWMSkm5vShwSK1aSEIXQ9E0QlJaJPJeRldb5pzC8bcXN7aojZei81P6o/UvsXGGEjvMJSeBoPxRyB7wHTbtGKIBMwtGBZaqusqMcWkWIYByOa8+raplE74yc1tVH1TxQkWtkENBvxPir1DQ2qcU+6zRjhH0iVZXf5pn/ra8zljzc1tTj0cYfLrQWqP64vZZlVoP3aYiqmWtJxloJlDCW+A1nduGEQPAOtEYjvUYiBcKSpUyp2KsURExZhafyzOTfDmamVniMVptdR4QjlOIsOwqvO49xLGJ4mLD+pr240a0Ng37h7tL9cH6f9YW7QNqypLk6IpJEmpqsZxW3idWRBz77cSUK7PnCYh1U4EEJeiJdUl9fK1nqd43Vr6yUqf8cckTDXRWnTQ1D65HH2f9rauhIst02PogIFg+f8ruvmJA0D4gGEMrASrI7IJVtMnJo4Kz1uK2q8rUqUujsjFCMykNSa5EqxGuKR2+nw3+mWL1PMpNURhUAHMl3mfpUm0BhWAolLFhvtOmatT7uGFwXmRNZ3jOGHs2ZewwGKHqpZRAj+LCYC2kQSHg2SDe0uqG7WLbAtiOq1Ruw7ImInxQYY0XN47kWMgCqgvxfGR7C6a5CG8p6vrXo3jrfYtxNV6XyJ+DLPCoelJTNrixlXLyoL7EbMTxYhQCgHoSsxi55MVMSckDrlqUISlSbQ8qgok5WmnDczGY3SQLlKeLZnAVVqoHMHyf8e4Xlw+JiX68jRrNyVc0uHEOVBwDEApu5VGsPrKFNIRnCocoL3+uQNfNe5bJLYU5wANkTallEp+vkKHK0S4rH3GfVD6ocJKzGn5hYI2FqW0PzlfmoelTrS4iecO6tJqnoV0oZfalbg1axwVB24cG6Iz9qR0zDnVUt9xWbuJ91bw6ZCgsgFPrGKIERfNnpWScksBG9vDHsKkrxY7RsQRScwFyNUVazPqbFLSy5UnxA3IXHViULk5oemp8PVcWFSgdE6thI1KljGaS+3HFltsKmM4s2kJkCStPggKteVMQ+xXDgrp2tm4Esl5QiNYTb8KHYoLzeFy4A4PSTpNqlaXUAJ0IVhW4HnvkqJJy0yRS1hiDhKpwdpc3VxZg+q3KBbj+idY2kQLYCxZcRkkUg+6KGfOBOl2QUmiF9ssMf2+WjeKTwOAVkYNHEuG3qkVKzUQDgbBbpU9PlwAsHqfy6RC4tjcvEw4ODUip7qSvN/EfiqHfOTwEaWsVJud1I6FSEiu6F5lbGiLCyw351bUxi8GLG1xz6bYoaxSlvRM6nvECBO+Tz0caTYMXB7bvDXb68ymlYiUtDkx0KS7Hl0TpXJ1Wt18c9FVgqKfeOFUhf6eBO6DepHA3xTVT/QzENonbVrqQUSQJWJbCFEHA+mQB8OhXhCd0zKNKxr+TnT/Vl3ju9qkOKQQur69AT26fpKFY5RS55QbX07rlblUpppoVao3DrJzSFnHJQ85SWdLbSf+8MtVBVqeiS0YbjNohIHGWEiOFtJiVblCRKyl/Hv+Pvccvl8TAsOMPZs6R+lfcIA7wuXTb6l5jK63Dv561DecLxITQ2luW1JpYgoq1M/g200KYjGIZicMBcnB+kjKEolqV2IVi0nDxLGv72/RfuUSLivx6JJnMLVvAVMA0KRxolKo5LCUiaWdEaMiA0ZjE5pywt9SnRgSQdQgZEDIRsriTOmnF/ex2G96jkgn9PmGYNS6yrUlbQ6g+cpuc62w1JCas1L6Xxtn6YvZUjZ+rb26feiL8O7G+AcyJC9FUkkiXgKzJdVhchhJJVh9oOua1LQqrHMZyorSpX4JVI1fwG29C6Za0gpQDYLN5wZgCwi1Dj6Ns6ISF+mLWEcqkjnYHjejZP/Q8h9ytioqeXFAGcpV70OlfxbPMvFaCWjfR3PIW9KPH5VUJTAZ+tUxkg7y2DeuAGTnGAXOufGXeyMxVi0Hm9GYdUoAKx3e2gcsJXSRPro818UBg+uDVr9vI8UdXyqL1xOV+lpmFFtzFEuHaGkopU/WCJe/RstbMSmddwoktSftaywwkarM1EU/CAgW/s32g4GZ22/p75kDTXg3s/1GGhdJdYr7EVOj+mutT8swkiiXUQOlffJXxbGLHGqsmpF7BpUpnv9TyrcXc2gpgRJu9fjvPghVyvM5Y+SzXlRVNzVlrJmitQHA/Pw8XHrppbBx40Y46qij4JnPfCb8+q//erBAnXPwvve9D0444QQ46qijYMuWLXDXXXeV7ooNfRGKvtzdqRrJolay9gvbdzi1FS1H65Ui7ek9bRG7Wn4PRrXFbY6qqtoEBb8DPfwbj6hB+2d0P6iv5ZKfYdinqlPup54f/lA1KlXZdglmJgc99ipkiRAFUTOyYQykv03dfk7ox1eF+W95BdKEwUyfWv9TG0sqOBVrLnHg+sndo39z/eD62AXSO0pOR5x9uScUJ1of/vCH4VOf+hR88pOfhG9+85vw4Q9/GC677DL4xCc+0ZS57LLL4PLLL4crr7wSdu7cCUcffTScfvrp8Nhjj6U1Rtd2bMAsh3uMiFkzTeSAW2ypREnrk4VgaUghila7ncVmRseFHlQxfb7kZEH7TW2egyokXq2uK4TQAsFG1ZpvzbYoEK6KjtfwIpBC4/voXqBm54gXB1I394xIrIJCGeOYS0AwLPagUgeyRGi6tN/1/VOQKw226sl7rLh68Ktf/SqcffbZ8PrXvx4AAJ7xjGfAH//xH8PXvvY1ABgu3I9//OPw3ve+F84++2wAAPjDP/xDWLt2LVxzzTVw7rnn5jWM4wcsHCJWWVFDo1S3FX0GFseyq1PE+lFK0sSbZc747tI84ce1w5duHmyzcOirrlzeR1fzG5z23dtGBxVUtWIn856EFu8oKQRA86KUoGXmiNktOG45ZT0IHyUcV+9G1SLVISXyrPQGjZ2OVcFq0kruepa8BydJBCb9bCpKOEANKnBVlfvR4mEV+Y/y+JEf+RG44YYb4J/+6Z8AAOB//+//DX/7t38LZ555JgAA3HPPPbB3717YsmVL88yqVatg06ZNsGPHDrbOQ4cOwcGDB4Of8C0Mr6F5ZLU425ovo9VjbU8C3mzcgoh52tH7uR6KsX6zaZS4g4Wo2GJ1ap549HtRVQUwNxi/4xy6h8tq4FSBXJ85AzJAQxxbhzEG9xz1nKSqMTweLbUgU54J41BtbZLEFtlD41ABYX2g56N5DTHBsuw/DEmdxgW9as/mooRUl9Knvt/HAnyWdLGdFepvcUnrV3/1V+HgwYNw0kknwdzcHMzPz8Nv/uZvwtatWwEAYO/evQAAsHbt2uC5tWvXNvcotm/fDh/4wAfkRuuRHWQA3SUHfKgAhAdYKiGKxey02nYht9pF9ZSLlNyMLRWHcPBx2Uhi9UtenpSIWKUDS3JgAFkiqwYAg3poaOYOaGmMUrNn5wQ4w+jrtyXOBMt4+nVKpRMv6ZLnW04a2idxajeUdl0NAHNjhxe61lqekEKfsXSZ4mTCjYN0YMdU211h7YsFeJ1apPiu6IGwFpe0/uzP/gw++9nPwuc+9zm444474DOf+Qx85CMfgc985jPZdW7btg0OHDjQ/OzZs4cvyKn5OC5P+2nVWYBgpNijJK60hF2L6xNXJ3aekLwHA44XxU9xPxJiun0vOXEEK4fTpX0xEUveUaEXxGxcMLYLBfY0q/QbgyIt+bat8E4ZUQ/QpnK0/zTp1YrAUQX9TYiq+k5WqcriDGHdEzl9idUr3U/tSwE1ZlVVQ6pTweKxab373e+GX/3VX21sU8973vPg3nvvhe3bt8N5550H69atAwCAffv2wQknnNA8t2/fPnjBC17A1rl8+XJYvny5uQ+trAfNjUwCRKUu8Su3EZtGjq3Lc6WWIGZrtHnqYWCRqNB7iXntuOewbWl0v5pDDhbUftU8p7wDw+3z5RLmY1DBMGVTPcyGga+jvgfXgmeN0qDvF/c/h9oBzMWrTsKIs1ezwGMiwElbvgyScpqYu0aarcY2Q23tYocR2ofYQcp9EJN7J+eGZecig6lJKl4i4tSWUj2sZG14L0v/Up9J1Qx59OWBLaC4pPXoo4/CgCyuubk5qEeLZ+PGjbBu3Tq44YYbmvsHDx6EnTt3wubNm9MaY8bWHFAaA+vqHbEr+TKp9UqwSAIAbTuMtug0215QJ+bsiL1HkKZYl2ffH+ld5ubGP4NqSLCq0XXcB40rxH1PDR7VbGkeOMktfv+5ueH/o76bOFcLY6HUk/RBTQ8/JnU9/kkAnlfWm5B+psPbFefmwr99PX4cPajXJvs3slvS9+RsdF59icq4um5LqUDODC2cQ5JUrA4KlrmL2Q5jnrZdYNWscH3R+kS1Ix21FcUlrbPOOgt+8zd/EzZs2AA//MM/DF//+tfhox/9KPzsz/4sAAw3wDvf+U74jd/4DXjWs54FGzduhEsvvRTWr18Pb3zjG/MbdjW4Li4pHtyhFTgFWBZeJOg4FZoNILVuC/eu1S9IVElqs9iBTL0EM3XrZnWWZSywXYE7cPH7SwcftZc15W3ZQfzzoTs7brcGqAdjrzvMsVNCxX2fK0LMpDyFneD7KRz8ovdgl36Q+Zazwme03awR4ZCPgZPW6P1JIqW9ukeCilCcaH3iE5+ASy+9FH7xF38RHnzwQVi/fj383M/9HLzvfe9ryrznPe+BRx55BC644ALYv38/vOIVr4DrrrsOVqxYUaYT3vPK/w1gUw1aDq8cEbpL1nd/2GmEi5bvCqKu4whVUy6FYJG+qVm5KcEyO1wMD9/Aq6+EygRfx3VS2x51pTcdVJF1QRM/G5DMSCTm5RPrp04P+P/BYPixT+0ZuvYA2kwMVk36+6yEPybEqpenrxu3IzkC+b/NziqR9cepjnMZwK7EPLcPFF7V2nxxuqwKu3LFWaf+cfDgwaGb/H/4IDz1zn+Dev8BqI5aMdZHN+7Czk60FILV2qC5hMHyWRQOXCxPifyGkpegVwkydVozq8eWlXrgUVWQVd3p1RV1Da4mTIvkJciBs1tIcUV0rHDZ4FrCNktVZ1MCyqTCygY5CPG8Bl59lHCwavrhnmzUc6pr+/AdmjZwADma56Zt/BurrJB9s5XnEI8RbSOm8uOcvuj1lDMj1+lkIKhMuyBnzTRz6kKi5cf2iGXDa4ceh/n1x8I///hKeHzZo7DnVy6FAwcOwMqVK81NPTlyD2aCpp4yHwAp0phF9aiplmLPasD1cpKDVK8wDlb+hyV87crGZSyEJ9Y25Xg1SYrtNCNF+ev4WexUECS2NcyPY94zV1IcPdciMjnQOHjOpZxrB0s8CbGOQYCxZD/xB6RJEhcYMH/ASvtA6WMDzrmjIuss5pjB1cutVW1N0PFKgfTeOc4hmHDRuetAV5+cRIs7CBgxPXuTW73FStXZRSVYQp1IoWwuE8HC4DY+vY//jngOqh5xpYEJmkW1yx02mppK4upxEl30fKePNBLCJc5jSZuLyEAwRJQ7ILXy+J7k5CFBq6vUujI7YRUcb41g+d+5Uhh7Pb0qgGknWtJLDyqAeRgNsCDGS89Z1UaS+3YuLMSD2lMsSLWvpPaJgvPGo6qq2MKntgpus1DOjXMiMBBP8RM2HLo41Fif9dIcd/C26tQkVUS4AKJzyUpkDEGqyP/Dug0ZSErDExvBpb11fdT35DyKqXsm5kghtZPanuQ8klKH1g8JXdzxG9Vw3uMe0020+oBEuGIEy1+XjOYFvtjZwEK4YoectrCFvmZx6UkOK4p6SUMd2ptSVVDm69rXl7W6UoDVkNp4WB1zrM2mjjkmWNL9AlJH1HuwMfanu/J3hubUEYNUJqbai9VtcUG31mVBbJ4tsW+JmBEtgLYNQoqF8tDihSzf2cKwHuqKBDNsI/PQAeAX3SCSw04CYycz1UPVfJxHGS3LGdoB2kb+FIaBjrMWSG6tPzVcobmuHMLW5LxdwI09R6y4Mv7ZLLvKQN4X3u5Ug0y8cmB1tkgNGcHooiFJkbSpp2RXE4BV8p8Qel71UwZ6CKQkfdXqkdCHPWkSyOWgNbtBqj0Be48BIVhGhEGlQnJaU0UR4tIHMglWO0tJD2uwi4SF3ktldloekxHJz1Knh/eAAwAxEUDq3p3Wvb4I8eSQtLiDQ1KxWTNLxBA7VHIXvSZhSWmmCqmSAkO+djCl2rA8LN6E/m+kEmQlLKxaGwXdAsD4s/RSKETXVF8A8jxQjplKS1Q1rc0nRcylH3HdSQ4ZFpWtZudIkbaYuEAVzZ4gfRHrH61L8n824bY6XPVBsCz2ME1a1a5xY5g6RpqU3hFPDkkrN6g3Wm+HxZ4LzSWb+xsgIgmkccWaIVtM4+RBvzQb/XhjmIanFZ9jiZVpeZkl2LswLJ8x4Z6haLlFo7npEqtjRUv9aWwTSx8Ak7MfWfYYZeK4NaV92bhPlCJYnDu99n8KOOJitZ0tAJ4cklYMmONNtROlcJKdJDcDseKu50gO9LMQ9HYk9qf1KQnpy7nDytqHDD4QhUwNjhIsKUP4wM+PZJsikkRqHByNy+JUzE1AeBUe/o3UGrFPaZJybE2xWoZBI3FKTIbEnARSCs560BpHYpM0pIkK2vFriEuSyyUd9Xau5n/UHg4+lvar72dqWEmKjQu3rTrZRPqI/6f3pXZzENOqSH3E84z3vj9PBgBdsrwvHUmrGoQb0PrxwWmAldtcAL05e7ilbJJENVUrYWssV6Sl/ZjtZDD+zAb95IbJq9SChbCLWYGkUzZhbgKahLmtG2OmsZWDcvhg2B8OImNBEvr6ObeuPcrI5khqfUp2VAoujVypq6c+TbekxTIpJKkn9WazxGHFJolKMZ4zSs2y0AWaRJB6aCt1e2lLJBTURpLq7ks9BQFCT7CYG7sjkpjkZRmzzVjyLLZeY1hvy07G1U3Xiu8vrtuaozLFc5CT0hgbn6jWrZl59+NJpa2gDUVFy/XR1zvsTPg71i+a6WSetE3nEe9TqQ2recChMcH9ytnztJ+SJLMQoP2h/08oWS7AUpK0ELKj/k0ivhKX4uGlngWSfjrBv1+iFBr9HLvYXlwdy3r5tbz9uuj0w0OzZZuLrCfTesuRwiwejIWypEwkQ8hIPRd8nkT6yKbmUm+RLCS1G5fKy4pU+9piQEkJL6cuSSXcYblNt6SVCutBrHlDYUgS1yRAuXJrfJEEKVh2oEhZ+H+8WakEgXPDxXT4/m+cjgl7+mnjmyRhkvflYso4u1zTVjjnrQ8cqm3H1C2GucPSVizYnKaTwv8LXoVBxhBSblyXYZ9Q7nxka6pw3c1tQrAkyaPlXEP6RW2HAGGaJ1xmTrERhZ3Tr1PtTuzZBKYoG1q9nIYjVUuSct6xUqPtUYolKWlFbR6dKo8cKH3qrjlw3LgYEOt61X+zX4OV+pBaJyVY2Asw1yNQQx/zWDoYOMfzMLcPnNYg13aJJC5KJFnVYGnphdq2uD6Wbsv6/yRQev9LdsqesHQkLfoRSHzIuTrY1IENRuJSNS7SQrhSJIKoB5iFo038HIZVmkxJh5QSA8S130hWKCM4wHguudgq5vNMDaySiGXDiWoOZHOzwEI0UqRkLHH34FgkzqUmFXHl6N/Iu08cfarKq0Fft811IrVj4Oel9Sf1n/5PpUh8TdsDXPucxNa35qZlU02QuDw4LQm+RzQUPvdgNVMP2tE6XLQDi5YtwRVJ3m5ccCi9VnoBByoCojbSnAKsaaqs46UZnLuqPXORqvrSoK0xbj3kBINPIq1TF1C1HIDdDT56MCbuSyvDltIvfM36rLXOvpEzHpjQ0fmRPvbZYiDSuwqwFIgW9ykK7MmCpKwoN2yReDhYJ1zjhGOu2/Q8snLVMWJICRdFin1Fk2i42I2gDtfOcuHjr/DfPWcg8GukMh4eqipayCwfPIfLWLJhqJ2xEADelmeGZudLrcO5NuHCnwnB6ruqgiYQyzIkyH7WYMTlwxw5ZGP97ErgYs9z0hzXP7M0n9Ffi5TJXfd9rarhGTVvGNeOmH6ipaGUnn8SLuwxSO+Sk3lcI1wxSJxlF06fuEcX92SzhjIEXULEK1ImBjEA21JP7JtcnIpKgxRIH0Nf9h7psBwQgoU5+pxP9GCkrPeYM0OX50ugz/qVcXKDCiprlvvCfVyaRGtQAThP+du32cBQDrHPVvjnucntIuZb1G9WqUgiJNw7BM8ZVZP03Wv0tVmJm+W+NEslZpqp3QIu0LcV+xMh7GRNmAnoJALVu+SQNBzUqi2Se57LiKE3EP7dcOjEq88TrNG14QEJbcIlwRM1xqYy7KdBeumqGi4hnfVhnrCA8ywk7YsEC4OOfaH+L2IleD8QN2bf0pPZ3V4gRlJ8kveckzzoStiAOPVr9JGEQzxm1+iaky9ls5QiPn2tp0DyojYCwTZYSnLtWo+VQIwIlhsMf8RyXP3ZjKIgsU7avsQhVb3YFxqTS8KY9NC3pSFpjQamlbkBYLjJB0NbiJh6p/QBIx3yse8qpRIYzQ5mfaeU8pkBx03NhgXMEjua1URDioRlhSKJxa6b1ItBsl9FWrYEU0sOCwYkZX+n4BiPASGwWLryfWw9Q2xZprbJ2qgJAUpRoXL3+yZcFnWl5b6FKaCOE1qfrM9zzAInZaEyrgLIzT24NIgWAH8oVRU0LiqcHrykt5W08GIEK3avD3B2sC42Aoqoi3muAV9Rj3GflKmqvDnG/bcEU1sROywsH5wEsB2imQdt1JaXYg+KOd9QDBKJlRWlHCq0uilS1YclCKOFGFH7oAVWV3gDU9skzO2A6SZarA13JG35A25QDfOs+b+bggZjdCyPH+fQQO8DjA+dSTh0mHKmMasmVZUWI0wxT0L1kySEi7NKqimqpBhiG7AL8SqFrutJmSOVeFkJF2dPSpV86PMa6Lph+5RJGLmDu8sasxKCLuCINR37HGIp2bwmhOkmWgKqqgJHKZonYqVjWaQYKw+JYEnPLlbEPIUKjGuW16AoYccay1gLXOxa6hx2cdABGD+bqhqWxiSSuzCqMqQu5Ra7E6duKgHVC7PKZ2qoKqwkoUlxNbf0wapmtKqRufpKvP+TVj1YQZqqqa/gS4s7uvZsLuGyPsd9IZeiy0LMif0RvpMVeA06hvhLkNq0qmVTPPO4vI+KxCJ97bnRCjSqSCqZcPatauw9Zw7poKooIZAcox5Lr7Hvp7HQ9iXH7eMxIm1UlgziqQev9VmpPO13l/pynrdIarF7OWNmYghdXJvSAdNNtGKoBgBOy/PTI1LywpVQNVm/VmwhWF1cq7uAU++kevOl2hiawzLjfY3SmqZqC5yHqB2ti4NONFA+UmejIQhVr52cNdh2MMMybgecG7q5a+UXElTVuVDqMoPkhz0wTa7qOQRrEiEfIyxJohXkrGvdtHPJAGDjaHGdXJuSUZ/ej6GkpBhVI6AD0xtPLV500C7XHHTChm5/esQqQUS+48T1B/UrCVLMDM4GIgEd+MNHI23TNScRL21d4XVtVds17TFlUX3OOVmzo3kHdnVHj0kGmkel1RlDmmcPzGBabDux+krA2IaJYLEPLi4TxpMrTitwGc7IkM3VJ9XJgcuUzcF/Z8gfVsx3h9i6rVDVBoW/+6Sk0Gq+gutjzDSCn+sFiJ04uDnP9cRLUV2SGDrT13/VT40QZwLL3GO3ZP83/qFlm78L7BMP3+8cdR2WvKWYRO4ZjuC1NAsZUlJs/XDjql3vEVXt8glWqzLjHPaIpSFp5epOU75JVBKsik54h7m5tHpzVUsxoiC5qgbcppDJwxMu/LwUm6Ry08Z51uKe/JyneEtKXHyqUZ6ogVXCRQ+ZHPUlXtOc15vqfs+8G1q3Ut8DiTrXaSHVaaQP+LFr2VvRO+Eyqe85Ce9Brd2uUi+FpJHiPEYBYPYRSALWe5ByjAN00MYIV4rRm+2QYG+yOjBoC5suBskuYu0bdz9q/xC4WOuHKktxntpBP02emhR92RepTSZ3HrAU6VN4cYb4Uu3FYKlXcgOvyd+cCpYjUrkEOod45XocSs9rTiUxcPOo2JX9p0m6IFlEueWWW+Css86C9evXQ1VVcM0115B+OXjf+94HJ5xwAhx11FGwZcsWuOuuu4IyDz30EGzduhVWrlwJq1evhvPPPx8efvjh/Lcg0fgmDjaVCKnqmoH+I5XHddP6rWJ4ykJrtZE4/bH+cA4dUt5Eb7uaBMEC6KZmtCCFsMTUW7EMGR54Prg1ZJEmraoeMeaOV/sO75E51g7OsAL9Z7GiS9+sauocT0cLUSkh8dE8pnWEKc0cruQd/Mgjj8App5wCV1xxBXv/sssug8svvxyuvPJK2LlzJxx99NFw+umnw2OPPdaU2bp1K3zjG9+A66+/Hq699lq45ZZb4IILLsh7A4KiGcJjtgOLrYnCHy74h7ZX2ubk2/Vt4HqkvnhIbtdWRw5aFyddWW0UFmj2NQ+aKihnHicBs2qXIV7SO6a8J7OX8NeGTc/GDi5c3ts3Y8QtRvBykHtol1RZlngvSYLUysUQk+xS7JQFxitZPXjmmWfCmWeeyd5zzsHHP/5xeO973wtnn302AAD84R/+IaxduxauueYaOPfcc+Gb3/wmXHfddXDbbbfBi1/8YgAA+MQnPgGve93r4CMf+QisX78++2VaXmipiHGmsQHXgoxjsC6iWEqfrp5yXeJ/AGyHYsEFPK4zMt9SBpSuxIra9FLqy1VZih/dhHAcxHKCvZOqevB71YPQE9QCPzaecFF1IU3z1KwLGLPTqWuli8TAecmWrD8HOZJV6T5KjiuSirfnMSrKXt5zzz2wd+9e2LJlS3Nt1apVsGnTJtixYwcAAOzYsQNWr17dECwAgC1btsBgMICdO3ey9R46dAgOHjwY/LSQ+xkLfJjlesVIUgq9nmOzsoJKK1R6aqS4wdgRYXTPc89iQuGgrxG1JwY3LthbMAdcxvsUgkXHIRWc91qMWdL6KeU25CCplfHP3Nz4R5PqcX24ToO0b1G/j8NOkORU1+FP8Fwdlq8dwHyNPEs7qgdz1OiWvZuzp7UxzlWFLpTaVCJoSrmqY1+LEq29e/cCAMDatWuD62vXrm3u7d27F44//vjg/rJly2DNmjVNGYrt27fDqlWrmp8TTzwx3hnpQOoan9MF0mGd00Yh/b7pUy0SoaJu+RTYld0Sf9WHdxibl1BQXTZ/G+0oKYRLu5fixu2xEE4l1v5xYQ6WtSplSZEcfSikWKwYcbD2T4OV4Z2EpDZpAsYR4cFgLFkTE4vrOAaLUJHfxrZt2+DAgQPNz549e9IriaUxApAPK+lAYSUZw4Rw9cWMz9ohquWh830DaB3gUoYG80cym4c0ZwtkjG8RMUb6SDm8OUIaU+3EpCuLEw+9hmN2mkNQka4s0qEGMi9YUs7OVjEJm16O9MARLMUVPyijSTPa/xxyGKqSRAwzNtYYtR7hv3Xm6BkxqNpzQP/uiKIu7+vWrQMAgH379sEJJ5zQXN+3bx+84AUvaMo8+OCDwXNPPPEEPPTQQ83zFMuXL4fly5frjVcDYL+nBWCz03R1ibZw433AuoCtXn/0PVLd/aWYKyl4WILUpqSW9HVzEtXAuGFiG71r6EOrPSGuLYbctboAcU7mLCAc+nCLD2yQEeLlx3mS40bRly0vlXnQ1MWDKky5NQAYfjDL/19WuizKXm3cuBHWrVsHN9xwQ3Pt4MGDsHPnTti8eTMAAGzevBn2798Pu3btasrceOONUNc1bNq0Ka9hKbjY5BSAuF7KzXT1auPcsLmvDlvrSu2b4pRRSVzriCNSJS7JRuIYu0TtAObnR8Z8oe8l4pBathnB8YIDVl3mzj2VuHKQYqfDsVFurI5zlFHIXcdUypH2SKyPnKpQwoDY1vxv+sO1Q/sdk+qsErX2jql28BTCkhIW0VWCsYyZdH3UtqOMIZkvfz/4nlZmt5MlrYcffhjuvvvu5v977rkH7rzzTlizZg1s2LAB3vnOd8Jv/MZvwLOe9SzYuHEjXHrppbB+/Xp44xvfCAAAJ598Mpxxxhnwtre9Da688ko4fPgwXHTRRXDuued28hxkkXIYSgG/2j2J4009IGj5Ll6I2nuY6+hZKu0KbXwWo+u6BDrOHQ6fFlGQ1ozkXSjtlVzvSLwOkdTVZMzQDPa542AhjJa6p0G66nP/+XEsoZ2Q0KH7yUTr9ttvh1e/+tXN/xdffDEAAJx33nlw9dVXw3ve8x545JFH4IILLoD9+/fDK17xCrjuuutgxYoVzTOf/exn4aKLLoLTTjsNBoMBnHPOOXD55Zfnv0UuuMM3ZkRPsaFIdcYIi1anpFLKlVboAYIWIfs5eAppg2N1YJcDwLI5uawFKWmacvsTYzZSEVPj0vXn29eIeKpTSKxf3Nrlxp+2UQ/CfqYcjNqYx9aW7zsnCXRRPebMdWx+rfuEs+HltBeD8nxVu/Ts8YVQuaLRuJPBwYMHh670/+GD8NQ7/w3qg9+FasWKcDLnR58kwQcvZ/SniC3G3A8HTqo+WjcXVKw4YgAQrl1ajNxBgtWDVqIV8+qjZVJRReqnwc6lCFHssNC2XezZWPovTi2dipgUKDk20bVAyw+Is0ijgkaeZsTbrAVKrDW1rCdYXN0p3o3SukidZ1restcorETL0ib3v/QcY1IY2rPQ+Puf+XpcdlCBO2IZVPM1wPceg/kfOA7++cdXwuPLHoU9v3IpHDhwAFauXBl/jxGWRu7B2tsA5uKedNG6BO4VwOB5lnM4GIJBOXBShVSOO9SIkVyVqoQ+Bh8xrF3bIcLyKTPcP0n9xL2D2Y5gHNMY523xQkvpFwZdc9GDLpLT0YIUSUeDyWboAGA8t2KAsqYydMb1LtVHPdmCvhmQ7JCk2Ia6SJfm8QbZ4SQHdMxG/1fzdXjdM6xBX6qiktgUGQAyYPEGW0zIdYXmFkTMEYEx5It9miRSVFYxlLRvSQQq5iQQg9X7kzOUd3Wd11DQRTmoJ8Uxw6OkMqjLO5U6LzSnB87BCV9LJZyTUqRZ2inUl6UhaXWBhYOyeiFaYFFJSnYDizdRbGPRrPZcu0a0Dp7cby9RSUqyHXLZNTS1lQbpXek4a04nFkeGFJjnkHLSQtuWz81Iz7g6XgagPT6c1N30uw4cM8bPkH4GKjNUbwxcXy3hDhb1WIqkl3o45zhuYanc+n5WbQGFpCbsQoS6mBPzH52hQcqBlcutlfQokgJeU57t2/BqSXkEkLdxUgkMbVfLDpKS7R+ge2xgTEoWCVqEGGl91mzDVk/OeuSe7xy0UjoFbSlqa48cdSHTH5Mbf6o9KQWLTfPDoeQ7Zx4hTy5Ji+P+AELRWwpO9SgRX2Tl5lNcjGP95upuXcMHjsCVVVXIMWv9qUfjXTK4m5VUlT5TxD71gW10MaSGO2ApRrOhpRrkrYZ0DanJdSVPRu75IOAcS+OD4fe3AID9/lbzzGjdaVIXN2ddGMRBFUoxVLqIBetzc4LXeJewmFzCGYtNkzKLSG1ZJC42rk7vZgxLW9JK5QqmgdOJISWoFiMmsfj7zW/jysvtT8xDDmOSMTWx95nkGrK4e5eyacQ+NZMCa59wYl1sB+rqbNW1b5KDRwx9almmBCWc1ZeGpDUYhIcJx3H5a5K0RZF72FrRNYAxlyjFXG6prpxbZFTiApAlWGzfkLjMlmegMPbc85I0msvRanAOIDiv2nPgLJKadQ1aYFGbaba/1BAK7hnNA1ZK6+U9Cqm0FTzrVYPeIxVLdqPfdKy7xCZhxqyp3/isJIlJhzTtd0pCgVSblgV9SG8KqplNa4jkRK85KFV3lpuy4YDJtTnlHu6xPjXxLIaExV1hyQpRwqMwJeB5McJqu7KA82iMIYjrqUd/KlIhZ/vibF1UG9CFYVFthQnqMI0AcJ6Ck0IXj9ccsMxvXlVLQ9LiIHGzJZ0mcjnmJLfVBK9EGu8kedxp/cEebLGNK3mvWeJgYnFwHOfHup1nZBUpKel0QaoNLRV0LnPSMXFgPQORhIHrl6Qzxr7TSFxNleGYVADDpAH+Pebmxm1SNHYwN2TNY+mjJOTYyaiGIua1lyJlpcIS4xjLqsFBs9vR52sYzsFItbuovqe1aCFF6ZdA6uZfzPrphTzIJyGJdUXKgbeY57k0uko0aN1pNo8mIXDLvmqw3dFnPDRHA+1QluqbNEqkhZoyLF1JywprbIwGK+duSYkUawcDt9nVyNukM1KktBgkTlaSJDRbVFMmoT6AdCki59BBGUXYzA6Ww8KvGYu0ZZ0HSwygFbEx5tqOaiYId4699GCU0QYI4WL6EEhkgQQp9Nd3txLWZ1UN2fea2HElgkC9Cy194NpM0UJ0AZ6bFCLN2ees72m17WXgySFpAYQbjYvJsMRoaMhVt+QSLK3NrnablKwUEtcbOXhYFMs4EOl/wmc/ok2h9+zsGSVlQ+hSn0eO7cLadsw2M6hskjQdd2UeHLZxSZIWJw1pUhl3sE8bStjzcD3+7xKSZSHJb2lIWlxwIueQkGpLqokXm/V5SSJKJVBaRm3fH2vQKidRxtSmXOyGdjhYr1uQcnBIMW4aLPp4rX40NqxkkOSVRzjZuQwJV+on256BqbFmS+FsRaJtcgCN2DNnkGi4tluZUmKOQDhriSIpWOxQah8FKYWiJEGUJCXrOZW6bznJUpMYKVpaFNtjrWryHpsSuIwDRJrwSUlhKWWlIMtYTFNXqTIFOVJWl42dq+4zb3RBYsd15NoGxYSxEQ+/5AO2sO3SmgCWSlxBEtuMZNEWBqom81VKTZXtbWvsQ8yzz2J/y0FqPQtg01sakhYAgKvB+U88lz6QcyUseq/rZ0okIsXWRThKS1aPmAeixaU8BzkqMDXbh2C74BB7B6oy6iPTP0VqdgqAOMdLx4StI5GH1Tx0cX5LPFc+UwoHixaA2iwlwoXHw2fbkKQsyX0+5q5uKefr5bx4tTljPWWFdmISVkztmUJ4tPeV1LFc0S5a7/xHFym6HqQLFjeRemAInBi9bo3FiAUzloAlF9802xMorFy1BC+dUOk5N7YqajwXCK6a+kroi/bMJPaYRMywtIXnp6+YLo++NBtdJZ3c5xfQa3LpSFqpKMndp0hi0sfxpPLROhWVktUhQbL7DZBNLXXTaXn5LB6CXZBqD9MyE0i2mtL9sIDaVnPizWIcfEp95vXFZDjhpCrJjsbZvmoHzoeCpWZwqAEABGmAy7lH+yvVK4GNSRTaKU0M+mQQNGmRy2KCMMuIEQNOC9RHFHpKfTQbQWlgzpfaD1KBVTQaF07vaXn5qO1qoQhWRQ5Qbl3kEKw+JCArcmN2Sh+UqRKm5khjUDUlQ4vvSm2jtH04ZU/EPHQnKdFOMG7tyStpAfRDNPpsNxYLY/l+Eo4JwjkBLRsPezNqtgZL7FvOxuxSj/RszMtrUmsESxNdMlZw7yRJMh6x7Ce+3lQ0nLgh9g+vQU2Ca2K6YCjBpawxznvQ4jFoOfxLxHtipHjlWeujSK1fivHKIPwzm5aESpGqSh9Gk7aFtVx2O8Zspag+uzobdFW59Z03LVZvXxI7QJx7DsrmZFgXAoOjz5XnoJPUehIsfadu79zfk0ZO2wvV9xJ7reBeffJIWpr+3yJpxNzIc5BTp5RPTkOgJoSRdOXrwzE1icl2KefMfWOpy8HOxdKURtd4sNQYPiss3yyTEMtCYP0ydOs5w3qLSS5SfBsG3qucxEH64eupOEmAs6n45yVHJqXPLeD5L5VBRgJ9PzwukncwbisV1vHh+iNhUIHruI+nm2hZ5wGrw7h7+HcMfRAorbxGvCx1ckHGPsjTubAd6RMitA+YyFO1SCrhWgyJay3oU5IWg7YVNaHp8zqJKqaudpGu3D9eVyWSGmOCZRkLi00wRrykOicpGZVYq10IVs8ewNNNtHJAs0xYN6pJDTFhFWFOHxqChAiXv44Jl6WuSaLERkjlbDFyOWmMFDteJTAB/poVVQVNXr3mmkEl2/Vd/TXuPTQCYLWv4jpHcxNIXM5B8DXkiLQ2lswS2jf1scOa4+oC6N/zMIVgceV6JtBLz6bVx0HbdRFTO0wXHbGm4mTevaqq4IftG67Ditxxznlvtt/K0vVeYfQH3yuFpGwaSjntAMr5QrDVxplqm+sydtznSTgk9QeptHP3aap7O32WuOL3nm1Gcx5aTHGOnmGQiGwmlp6kpbr2ogU+YA4BTfrCz9PyAG1Xbg6tnIIR1QHXj0ji3CTjNs407jEAAJhrxy5xWTSkzY256i7pmSRixR3iuelnUvpUwjEihfOmfeyiLuMkCItNNYc5oVlUaBxYCScoRYpzg0wnj1bmDmW8aP3UM5f2tTRKSnAYUp2p7QnxWWFb9uowlh7R4lC74cIWg2jJodBVHdP8nZGOx4omNctg1GyuzYwhpNjW1ahN0LPYecP6Dn1ygJ24/wzi1TzbQw6/mJqtC/CB2tVBJqU/HPGV0jz5a6n1kr3bfDYGqwgB2v9T4DmQxotTK9JAW+69uswh149S6yKXiUpFIeK9tImWT9sSXAr/Hy5sssBKcIOTiO/R1H3CAsHEzQWOGAFV0ustrfdPASYUKRIqh5yDuyShqhjiX8ImQNee5mmWXLekRSCHfQwpcX3amm6pVcff5WrgCRXNCG859CXCI0la3DO4PQnSXuZi7kqaQErZirvcT8TSJloY2VmZSUJZgPaimaTDguZ2jWEw0gYETJKkWqhDwoUdN7SEu9oBkcrBcZsgZ365ZMDsITgBxLzlcrhqrs5SBCuH4MZUhXQOaf8p8ZLGg1tzsU+YBO0mMA5U0sL9S0FXKYbWYwFHeC11cX2lwdo0jROrQbB3FWP6iZZlMcYOnhY3RVItxWw4nA7fKq1p6oQSGSViz5MYF4cJUquvHQ/wCXNkCw7u8Gs4dcE2ib9mTJ+JtqfZO5m0SBg0zEGUMJQ2uL7HENsnnCo9VTUZ2GyVuC3/t9gX6zwUkJZpXZmMqYoYI2R5X9xm7ZhrNcDc3Ph+AQ3N9BMtgPDgp9cUtOxAnGcVlSaCdiO6eenzD5IXldXegA8Qi6ODZSOKXFUCoeLenwuQzTXu0zqkcIWUjVFa3UJhOcC4+KSuTAkNNqX1xoiXB5WuNMZNIroYLJcu2JLpuGhg9lTjkOEPZxq35fvjy9QQV1H6vzX1YCqi6dkS6+6631IlLu5aYG8sy4wmG15uueUWOOuss2D9+vVQVRVcc801zb3Dhw/DJZdcAs973vPg6KOPhvXr18NP//RPw/333x/U8dBDD8HWrVth5cqVsHr1ajj//PPh4Ycf7vYm1EUYgD28Wu7fxKFBRIqNSnOr5mwy1GtJ7EMVEixrXxYbunJcOCCc/kjXtR8Jlk+plEKfhJNb4zEOmx7MzPsHTF9XpwPqiYvn0vo8M09i1o3hzfY1vzajnqIT3lcxVZ4G/E45no0ckV5AJO/ERx55BE455RS44oorWvceffRRuOOOO+DSSy+FO+64A/78z/8cdu/eDW94wxuCclu3boVvfOMbcP3118O1114Lt9xyC1xwwQX5b4GhxTHR/60Ey9oeXRRavFCsj/jgoMTKc7wDQoBdu23H/LB9M7/vAhBBC4HjxqoLJDWY5hGa8o0wbp0OyByXBFc3t8Y47QMJX8BrrsUAUsKoOQul9N3CZKSA2SutvZCiuSm1Lyaxv8SQHka9lwrOs7IwktWDZ555Jpx55pnsvVWrVsH1118fXPvkJz8JL33pS+G+++6DDRs2wDe/+U247rrr4LbbboMXv/jFAADwiU98Al73utfBRz7yEVi/fr29M7ExoaoizXbFIVcaiKn5uEWREueFDg9T3a0ijn82BY3+WlEf9p2UOCYtpG6+ihy6kit2K97OEpNi7M8kPDNNKaAYVZ0VmgNFTCVvgXWMqK2Zc3fnrpVyJbfCLE0WVLVJsXRYXZrDXMwz6t/C6N0v+8CBA1BVFaxevRoAAHbs2AGrV69uCBYAwJYtW2AwGMDOnTvZOg4dOgQHDx4MflhIm5E6VlBOti/VDBXLOfFc0pW33JYNBAuhkaqEtjkJjFWlTOrTHBx8nymHzUme3LymbByq6kLtBZJE0A9GImqkGEFa4tTYYp8S1ZM46Dq27mJtekhjQR+zjLWVYFn6S0NUuB9flwTvxOV/Y0kL28mpg4H0u+kPkdZSmZAcZ5CYGpTTqHBaIVqee5arq9mPo705GIx/ChOwXk+kxx57DC655BJ4y1veAitXrgQAgL1798Lxxx8flFu2bBmsWbMG9u7dy9azfft2WLVqVfNz4oknpnemhMqlrwNcVKPwB5aFYDXIUa1w2S+GFYf3u3oTSkSdbvS+JY+U8TTVNxD+rtrcsg96X6i4N4pIqiWRuaEosVcKjUnQ37pOc38HmGxISwxdJa1JqDl7llJ7I1qHDx+Gn/zJnwTnHHzqU5/qVNe2bdvgwIEDzc+ePXv4ghyXLR3W+CBvGScZjg23UdJA3+JYlDoHyH7AEDrzgaLBnI2AseVx41gKWr+0+cKI2XBy5tLSNjenmhOA2P+Otldr+iuOcEXWRbPuuhzwXB+ldjnGiUoF6J5r2asiMXEl0CcjYrHD+XI5deM2KETzRUGbo4JeXN49wbr33nvhxhtvbKQsAIB169bBgw8+GJR/4okn4KGHHoJ169ax9S1fvhyWL19ua9yaIsmrnXIO2hRbBgU3qYFOmYlbMSyEzsQKYHLcvjn1E7U/uHbQqPUTHaUgSaE5aOwHhfLxAcTrkexNFiBbLfsNq1SowdSJY8Kp6PzY4i8cS7YaGgyrlc2BNZxFQ84ej9nBckM+gpCFtH5VHc+q4pKWJ1h33XUXfPnLX4Zjjz02uL9582bYv38/7Nq1q7l24403Ql3XsGnTpvQGNZ2p5pIL0D6ApIzREhdt5fCpeo5Tg9GJZFR6VYJ3HM3urh4uKS6+VG0Xfa4O/5YkNCmYWeK8Jb075Q4121dL+lHsQcFcKfMdWwut8oqqlVPr5ngv0vsWLUHMTjT6YaX7XK/c1jzWofq0i9s27qvmMYvtWxRYy6G13XWvcfVaD/o+gpCt8Db0nj0gkyWthx9+GO6+++7m/3vuuQfuvPNOWLNmDZxwwgnw4z/+43DHHXfAtddeC/Pz842das2aNXDkkUfCySefDGeccQa87W1vgyuvvBIOHz4MF110EZx77rlpnoMUXjQtKS1wXEiMOwQoavtK5WS18lVVtQ+Y3u1FhcYiFsgdtJngoSYFjk8SJSSuktJarB2ACbUVGZfYvNHnR+um8Z7VpK4gyS6R8CflXZh7+HPnYMuWit4hV9qS6u4ZyUTr9ttvh1e/+tXN/xdffDEAAJx33nnwa7/2a/CXf/mXAADwghe8IHjuK1/5CrzqVa8CAIDPfvazcNFFF8Fpp50Gg8EAzjnnHLj88sszX4HAD/58meoCWA+31I3dqA4g2AwpBMtaNkjXZHpgdKj7hYmzhAwQ5w0QDymIEYgSm0dq35LJwYISKkF6oAQHonBAc1kzpL5p667lxq+o0zUpopG6Qq/Wdnmmr5wKX1qPzXWs0lTej4a44HH1/aWEi60HEa6qaq9N/5xFTcZJ/qmQmEtp3GlGFK1f0vtpoQocqgrAmFCw6kDjkonWq171KvXAsxyGa9asgc997nOpTaeBbpaYRIHLlzYmdj2Mc2B1jS0hnabGv1nrsaIE19u3xGUh1trhkGJL04hX17VI+0hTdbHqKUWl2fSVEBCJ+Pg2c/tMbVwpz9dubFDBBzpXviuse3IhzhYMLHkmvLfr0OWlkXtwMNTPs+ovgLQYFQCZM+TuafUP0OGBF1cKodA2B1cuaB+rOOqgnC+tJsiN9i2DOEnEIYXwNTYTTUVYqG+59XBo2SmY9eEcAITqrGgIggjhXSQnJCta7WLmUJASJXCSHu0X1kQA6HtCei9KbOvwY5GtTxYB8M4ZNaB5wg8oRJcDPg9Uz1iLvW70uyvxiklbAOEYYhufpZ8j+6zryGROQDG9QIg5TmgQD51MTpwGOnYJNpQcDiholL8P9LOgdMqcvtrR3j8Sb2Rvo4D0panyYm2kcNwp71jSlhmsz0RHlBRQJ5xUCK7wonZIVB0mSFcxz73SNngNJTQSVsmShhQUtAFOP9GKDQYXz8H936qXSFdS+VrJKkE9znB/rMhVNUhxKFhPXzGJg7v0QxqnPtStTZsRwiV4vSXFlVmZHa48RzwlIhZTASprMPhb8kil/Ym9F+ukwIyh9xjDxCSHcElrMUXN75+PpVCj4y3BueFe4hyYuOe18c8BNweUaOP/uxIuzjOTaw//ptebuvphXqafaFkwYe+wTjFT6FlH/lZTLlm4nRIQjboRycHiti3VF+tHF4JqbW8poQ9piDus+5S4+vZ6tYIyDB5dzgBLjs0ukkssbCbmuJF6z1J3ApaGTQuhsWtNwpVZmICWJ19jrzB6FXpjMQA4xRvHwVgHLXpCUcJlURFKCysmCWBjPHVRp5BipGLX8PNBe9bNQOagV+cLoU8x2xn1eIutlz6T7HLrihszai/D7yB9CVuKKaJ2VqtdUbJ1aU4ijCOD6IkreQGWIlYAod0b15/qyWdBKuGy7DNNTTqqs3Ju6D2YOVRPDkkr15utZLqmFO8vX8aXi6mbINTNB9IYFe1TpS9LX62cr6a2SQ5EXeSSUdRxJsa4GAh9LHjegknE10TW7sTRtyTo2yg1tjRZdGlY+5ojgfWApSNpDaqGS6uqaiyhSFyolCpJ20gCd26Op8JcHcdpUy8pAADHBJy5ecLpjR9wAwjewXkT1riztr4G7SljIun1qbQ1EA5Y/PwkA337aidlrLp4JnKu4n6tSx5gEnAZTgK2eIdxwd9c2ylB0JoEmUKktdRpjAs8++keGnAcG9dWzGBHYqONRRfmhbNVcfXSuiU399hacSP7YBftaf6jU4CUzcH9nfKcFRabEOfyKxlILc/S56xYCGmmcyb+Ag4l0wBrgK0Gy1gk5f+b0HqJ7bscAkHGzEmHec76KSUdxd67T2/frnWzXod5dS4dScuD04lz4DhDDzWuhNjLtEwLWh0s90fidrQ6aQJQzv6BAj8biQtzQtpmalSTgurPjwEnNQUcGYk54t6FqzsH1riRknYBK7qqwrixo9lKuPaS7H0CqgE0tsABhDaqaKAzGevUQGOrhKHdE8emuTD6Fcnu4aUt/E6S7alPlGqjy7rI1dgUYBqXtqRVCpz4bTGOa5DclAHS7UgsF0PsYaPyIgdZwr4Qi7mSXM0tdcbS1UyaCGHE1kKXsZVSOlmR2zYddyklFr6eymzkcO85z8SyjAgHabIXcFdppMvzC5kVIwaLp3MClp6kBRBKW64ac4eYGFg2GI3+bulvIx5wlEvUDt6YVCi1KXGNrYUx5iax12G0PXpdOqRKqoZyxkn01BMkDcmOk/oeJdR09Bn6/tx4+LHQpC2tzhTgtqAe7y1fdyxdFCfZdj1kJRschZQmivbTOQCoDQwIsm05B+ZsFPQ+XRspTG6sbu5+zlosoZGgtsACmElaErTJ0jzg+s5+LXkSYq6xU5wYjrdKqMfymQsLJq2355L6lkBPnnFRp59JcNwlnAq4vz2s687EeApMHt5DMTsw7RMXbCxB0hJgSTaVYOXs78UoiWVmzV2akhZAGK8FdVsXT8F5XY0ra5eXYj7MHSTcvaZ21Lzq5ufH/ZkXngdoS2LeKVH9bEfEHtLS50din8TMCzi3Ww+HveTlJJZPiOHS4tVi72vlgNHaNKms6LykHFiSDbKRrPxcIRsXbkvod+u6rzP2/qpkneCJGEPthu+iDRWVPPD/qZKTtU9cH3KRI3H5d4x5pXrbN+1fYSkLYIlIWp2+nmpBqvQS20gcd69JadLByNnFqCRG00fRd4llspD6bVFhTTtSJec+OGAt/RdldKzMlRUWZxmru7V1bGJ21pbUwajkYxJMl8woFvvMYiRYHn3s09R+VZXOHESwJCQt5xw7Bk28VjUAcPP5cR9WojhXjTkOzrsOIOSyPaz2LKZsKzs1Lo5jwqhHoj+LseehhFQOLTXeyhq7s1CEsWv8WFebEq4HwL5Wk8Ib3Ph56sXKSlyCPY2LFaP9ou8DwI9vbkgJ7uMkQKUOjcGwIMdbknuuxJqT2ozZ+7WyHbEkJK0GnChKvcsoJ2bRKVPQTBmYI6+U+jiCxf3vQW1WGsEaZboI8hLG3O45cd73k3LSljGSvMosKB3jg/OrTSTrQ6aay1KnZL+MoesBkSvlLbTULcU1psxBVG3ZU0xa6jrpc6wX0iNXwZKQtACQitATrtGiqgBGWSJQrEly5bxarPUtHqzvn6uGnoscd9zKEuDa0ox2YARtIm/B+XGmDAcwHIv5UXQ//rZXuzLeG0uTMDhuOpezTXHrXqQbiZU2JGk7Bi7GipOAaFsegbdpIuGmtlxNCuY8GLm2UuYsK/5H6COONSxRHwBi4qjnsAvv56CUt6nlHgep79rZRcvNGyWuDph+oiU5TCDiFRIu+rxymGMwxIr+7xqahTbxICgYbyNGpGLAm2fkbjpUnxJVYQnEVDG5arUcgmVJwFsKKa7AMcKvIeY2rrUJ0CZeyYe3oU1KNDHxSkFXFaw1nEVLVMwV51I66Q/Y3z/FIcVaF3e9VDyc5b1y1lkipp9opYA7YHOJFTOBFYyIy6AaE0jTekEcIcel1+2NIxIx+rVVHyfhFy/lyFM/Pd5qz7VVrCnjWwKpHpcW0LVCMzaw/UAEBh9EKd6IJUHHgJPwLYitE0v8XDM2RibRl8kZM82jk9bfFVLuvZi6lDpRdUG2hsMgWaXUhceiB69Bj6VDtHAKHw+0YRqCApA8KS1OS1BF+rIN4bKCcyNmCBftkyp9xYL6qI1kAOVUb10M4bkc3mKClEoskEYYlXOuIV0jGhrBzHXaAGX9SYSmC/HqggJcv5pAt4fg2Wi4TYo0t9D2RYzAazm/mqVDtBqjNflN/xZgVgGwbsVtO5rpuVG/ArXi8I+xJyLMjYhK24uwqiqAubnh4UElLN+m/7+vxavVm6P24NQZsU0q5V30SFFZSE4ymIng0Mr5KBCulLaD+hW7hVSXReKVpEduzLV4Me49Y5lIcPta32hdqTYyToPBgTJv5H1beTv9ntMIV9d4TgtS93ZsTUrMU4kMGQWwdIhWJpL01VH9/ojzaqWsERY0IXIs8QIY2cXmiMs6ku5qaBMvCTGbFr5fQp1VQl8PYNswJdSBMbD2IibuzvpJD1ObGYcS/i3B4sBRAlLgvqWNPu0jnH3LEy4mcDnZvtUHLONhcb5J2ZeaXUwK3Lf080mb5b2qhguJjQOpSdEMA78ET4ioisUTLloOl23UlERC85M951UvI0JVwXCDzeHFMDd61jV/VjEjeDRP24gwSId/SgYHCySjuLRJpI2wSAzEZnRRiXVN1EzLUmksRvilWDFtzsWQjpgErdjJuhAPa65Jb8sD0CWuFCTFO06BqlySpnvE9BMtjJyDSXJhTXmWGl9pXZJNzAOXtxAwgPFmm8NEs4Imn1dqcmAN1J1bQmxDRrMN1CHhirXXPGdUHeH3KHHwdfXEtATgcpDazfUSsxykOTaoLoyClBkjy5vS70umPkyogxRWINafLXH1FV/XxUMwCLPhbJREVSqNe848z3IPCtAmAoCXhCx1cYQod4PiuubmyIatGQIG0IhXTbkRQatqYgsTdOp+s9JsGdRF3/pOKVk96HM5hDCXe6dlrd9SEusgzIGW2UF611RX9i6EsquqtrVeuPcxeFrG1lWMUTJJ10SVi+1bALxnqAdVExLCGUhcKLxkrK3w1xdAuqdjyxF9/H8zDh21EThZAWdjL4SpJlrOqwUHVXtwJNsOR7Co56Hkvq3VRa+n6J6l686FRMwTMPaZYRk3D6E6kYIzSlMJB3OzOUiNSwqCaY3Si+ZplyNFSYex6MrNeciRa5yzAkaL8VHUVl3HhLtPYbUJchIxRcwualXpAvA2E60Nzsboy9GwBIDQtkfXn2dsaFLnFMLVFzDBsQb7p0hlScyf8H9dD88wXF9VwZM+9+AwbQvy4OE8CAF49Z1GsPz/EieiLcpc4zlAu72GQ1QcOjgvKb7wqD3/b+QwTOUUS8cisa7aTJ9zVSS0LUq8SzmSUMTWRwlX8FjwN0ZfMWRd4+UAZOIlwaLK1BjLJhvNYKwSA0OdbF3CGdQFeJ1L63OSHooWJr2qAEA7l+yYfqJFiYiVYOHFQwlWy2HCSLhSxOuKLDyMgAMk97j6kddiBTD2QmyaQmpF6dMSWrCjlesvdfB54iHVJx1KFsKVwnhYCVeKUZ41rvcYq5Rir0pBij0vxRtVrVMJN9AkX4vzCjdOgSMG6cPouhtAqP2garFJOE5YNUO4jDVEgSLnfQo7Zkw/0eoDmk7YUr4UuIUlcZ3Ia5ELhvZXdLd6BE21MOnMDqWR5S2K5kKKHzK55XOMSEeHjlx0ncfUfpeQumj7rWtoPpq9YvSIbAXcJ6irrarArirDLo5QuWUlm2IOgx5ct3cBY+kQLSxloYznwQEucT/UaJrKucT6FVukFk85Sqyoqz1A293eX8PV+K+ueqcOSryodOnqpn/OuXEC3qb+wuqOoLPksFhsxFJy941t6D4JV24OPlPdCQeultYrFqAdC8jVxjawj2aq5qidKJLGqmXbAgjtW1rbXUIHJu3kIXlBq/Fg/BrMDNEaVpn6wC233AJnnXUWrF+/HqqqgmuuuUYs+/M///NQVRV8/OMfD64/9NBDsHXrVli5ciWsXr0azj//fHj44YdTu2JC8KkOveD479qNf2LPxH5wOak92i4Gzeun9RvA5q0zWmxVherGn5jA6tKqvVkdHas+0FkKyOwX50VmWQupbaf0z68BaS14Z5qWI0hJqUZZy4sBdI5S5ywFGnGdFPqys3LEtUZz78GaKYh5pSckE61HHnkETjnlFLjiiivUcl/4whfg1ltvhfXr17fubd26Fb7xjW/A9ddfD9deey3ccsstcMEFF6R2JUDrUx0cotkiMjdkXcd/uPopcWvqExbEoJIXVlBWmdZBSJwawuUJFPd9JAbmrPNqPyMEmSLVg6609yO+V+L9J3H4W7wcxWeF9UnrkhLU5hIO+gyd05r5wX2haj78bNe+eGghFxSlDvHcMU3eZ0YmmVPFYt+BnpLmJqsHzzzzTDjzzDPVMt/+9rfh7W9/O3zpS1+C17/+9cG9b37zm3DdddfBbbfdBi9+8YsBAOATn/gEvO51r4OPfOQjLJGLoflyMSIOrUNVU23QeylqEOvH4Kh3o6SizIXUDy4vGlEjBkl+sSu89/jJRWkbBq27T8TyGWJYPRdzPBy58ppHoGTTsX4njatHuh/sGaPTg3U9cGMV8/aj12IZVjiPW5qgOnW+JmHXyoXVkxSg3b8S3rmFUHzn13UNP/VTPwXvfve74Yd/+Idb93fs2AGrV69uCBYAwJYtW2AwGMDOnTvZOg8dOgQHDx4MfgAAKsmm0HQmgSvRpCBJ5UclqNgPlrykOnPA1Yd/5ufbUp/nhJDExUpdgopw2F0v3SrjTLOZS2quFE+0WNkc7lqSKij3HthO0Xtr60wqF3iwFshwYVFBp4KTaLT68XhxPxok1Sx+VpOqYu/B9bnlOYxUf8bxr6iUAWCXMrTx7QrpHRYB8clMhgEAPRCtD3/4w7Bs2TJ4xzvewd7fu3cvHH/88cG1ZcuWwZo1a2Dv3r3sM9u3b4dVq1Y1PyeeeOL4Jj5ARn87RxaAxV1Z+t/yjBXBgYcICK7TMYcbRmTBeRueasvL/FR4b8lCS0tNneO1MubX2qaUQSKnvi7vWWLMF0JaSAFHBAHKEfNphXa25KgRpd89oehpsWvXLvjv//2/w9VXX130gNu2bRscOHCg+dmzZ09YAB3CRWwtnKQyP9+WWnxZ1LblR5W+cJ0JG0MiUg7XGUihdZx4GbjYllNGIOUYOGGqtkoFld5iWSfEfqANl+Mhp7XJ9Y1mdZDKWaB6CZI1EfTBILWKbS5yguVBJbJGehMka4DyTJSkESpl0+TWfxeGhttT0WwuxnYdPucyu5f3GI+/+Zu/gQcffBA2bNgAy5Ytg2XLlsG9994Lv/zLvwzPeMYzAABg3bp18OCDDwbPPfHEE/DQQw/BunXr2HqXL18OK1euDH5M6Kq3pwd7poTSbtaFxITrk0S4YoSMUTVECXnHzZPNKHQ9HBZCzSERJHrfEz9uc1OClUOoMFLthpxq1jIXnHdYF8KXA6rGS0GKOhEj0layd3JTr8LcxNZDrFyf3pMYC8C8FI3T+qmf+inYsmVLcO3000+Hn/qpn4K3vvWtAACwefNm2L9/P+zatQtOPfVUAAC48cYboa5r2LRpU8nuBAicNWL6ZkqcJD24+LxtsfhA36Y2Kclk6j4LslXHOB+kUm3ac2OOyP89gvTFWnyt8sWtqjDcbw0liJSa38/QX5q/LvY8vh5RA1LthKzejdjPmLpb/3NxVDRHn2PWD0dwASBIBxaLw0oF/T4ZdpzASMmdyCVIxg4YOd+Uww4W3OeJ8H2OYFFwjhMcg6QRzdKEK2C6FkbaTiZaDz/8MNx9993N//fccw/ceeedsGbNGtiwYQMce+yxQfkjjjgC1q1bB89+9rMBAODkk0+GM844A972trfBlVdeCYcPH4aLLroIzj333CzPwSi6BG2K3kpW4z49FIVAOyd86sAv8hRvo8ZhwPDe3Hv0yZ31wZFzQdYWdE4pxNUpzJNUj0KwREjzY3l3iYnBh3UKtKDw0l6jEuFKadfiEUjrNcxdFDFGeRE4RphQMQRzAZBMtG6//XZ49atf3fx/8cUXAwDAeeedB1dffbWpjs9+9rNw0UUXwWmnnQaDwQDOOeccuPzyy1O7EgfOYg5zY+JgkbYS4Kgqj4vlqao2IaGHiD/0pI9Ithsm6kuqahxxlJGgP5ajVw5BSSJQM2V0dYXOtVVRQpLEOWcwPLS9DEKdJGGVIg6Yqw9CHhhoEiOVVHwfqe1OgtUdG0uB1owbGNrcarZK5plO9vtUgoXPDLpOJO/OWDaOSTqVVAMkqeVVkUy0XvWqVyXZMP7lX/6ldW3NmjXwuc99LrXpKNR+NSlZCGGQPmlifMd2PFjNP9tITfxmEb+Iak0HUxKJB2Fr05YmWO0G5eva2HeFlHNQYj78M9L7o+f8/E+SYPl5K+K81LEfTR9ojJREkPBca+UwqBNK2Inhb2ksNIZs0uAkRks4ggSLpL3AKkGMJZR7cChNta8h1G6Yldn/T6Wa5jnGzgOKzSGIHSGHGYC+WUh9Fbd5MIHVFuOgApgX7nHBzNI7tJ5FQZca+jLIRyRFE6jUa35OsG9SL8maSBKUcGFQaXv0TIt4iKrADGJVMilvks1U76tf7y3C5UHr9+NM69UkPVqe2uOsai8hXjFsR2AuS2h36PqYL8hsaO/VlWB5OxhqY6K5B5cUEtViYTF0eDVuteia9cAh5dgYq6Ze+qxAYPoA92E9+jNp5LabI1lgF2VKsPzvVgCsE9YY48VmyZuX6/0moIiEVRvWPIDY78CBx3IoYu9Bzjuza/C6hAizqdzs1q5Up7XeRSAZlcZ0S1p03mKbZsQd4u9NcVMqLUKVE9Ziq6hqEnO9lEvH7fkXHFRtD8OwYyGn2tgkDJwh7rOkUpG4VwmT0JVLaYksHlWxjWzxxqL57Ro7AyOFSFIeTX3URbLS7Dy0/5jjzT1Ucd9jqku67okGAttEY4TLBe/o14AwPni5ct6GHKQyQt/j9TEajlIag1TtQSm1Zkr/mbKuIyFdOpKWKVYiM1iW4yI5lSDnCEHL43uUS1dy24lBwgsJjqvtS9/f0bmBrYdD6thy6ZloIGuX+ktgIdcLZ/P1yGBqkpweUgLFLejYdwBg1IaF5sYSB7YYUOB8mG5JCwCCTBIUkkFWsyPg+9p17CGoEStqS4gdII4xSlVDW5VDkhOd+tZ7oDajGz1miJXuW691CejkFjmVUCQHiZKbVkt9gwkXlrh8H2mcDbV70PfxkOxpFNJ7YqnAOQDIlBY0SHPbIlb4f2ZshL3Ird3mmv8mnLdlS2uvqobsOcd4UvtjDHReLWMYjQs1SMgenLMRG2aRMbc0fESy9eFQHHwNgGfSJAepTCwdSasUuDRE3A+AbK+QruVyVY2UVTd1YLtXce+vUvV1zUDQxchfimClZhbQJGruPgdu3GJ2E+ucTSKAu0+HEoBgblthFlT6XwhbKwYmWNqazM1gYSVYUScqagPM0EhMSKKffkmLgrqBUmkLSz6pmykqJRUMqGz1zXNj9dBDcCR1mT/IqHGTnG2OwuIWWzqgNKg7YhfiymuBvpa+5jIZHAduCvaOrDst24WlP5xtSQw2NoyT5ZBipVBmboS+iIH3yHYpuu4Htq9al7YoYpJPqjdmSmCutNZjiQZioSf0XVK8cpOk0X5V4ktH0iIulQAgc1vqRiSqJvyT3TfDMFPvMbF/QjnumZSDzXKIa/WVIliWsRK98mKqlZ5d8jH6DtgsXH9ygGzOfijR51SJgMKkzitsC8olWNz/tC78g9GHbVma8wnbTKda0qrwOVkN2I3XeArWjMQlQZqEqL65Q/oaaZFZdOgRT7FW4LKvmrPHcZACOfuUrCxI2SypBCtVCqdBsVLbk3BU8R52iUSCDW62rumuBMk6LgrHLwZnN5Kjj3cU6pYyfaS+m+8fZ8uS4rhy6pcQG8sua7BL/0cqUPY7iAmYaqLVgONksl15jfp4sS8kfU0pSOqIvjl6j67jWgKSeqMUur5bymExCc8uf+B2HSdrTj9LfwDG725kJEQVIQOWcW2IuPAeuXNB06QlPZugbiuBSZ0TE8DSIFoAw4XDEK8KkK5b23wWYsVF1adCk5b89ZTYqBxOkHteGpuqIn2ox5wqPQRKHJAcjDFHnVAoZKIFbv5aKi4lNsiX12KwOJUZloyDOvnDciKpnLR4xUKIBitXSOJKldS5zCaW9SfaY5m5oBqRroStD5d63MdMO9ei+nLxxIFFcP8JeSKWq1waZx+xenhZUUrimkTOs5j+HSe87AOpnnbTwkFKc2c9tDkvOWt5Dn2odmPxhlYwc1qEqFIbd+k4rhxI8VVd+ibFlvaBlHlxrsi6m2pJq7FXWXJ6WSUSi90qB9RrkUt2KUlb/n8A3uvK0jzhQINDoOnL3Kg+wgFriTkpB8pJP7EYMA2x9ysd5U8lFNx+zjeWOvWJqJpz411aalUYSzrcd6W4eMRUWKUommmC64Mv6mmNVFXq2LSkB6YvgUqXkbaQ3XhYDZIkC39NohjoHrXEfcUwQbPBIhzRQpAWi7aRcgmWhfMFCLlQCzck5VHrCHVzD6oWwWrFwsSkLfxusTG1coOpXHwul2p5LpVxsUhZsXnG0kHXvtWMdgHfAygjMWlzlqOORTGKpuYlpwy+sNpmNujXzymk+aQxZz2cAyI47VNX6c/XUQBTLWlhZH/TRvKMs4JLYUTtUVLGg5RF2EVVYGgziHVhyrCxMJw0aBi/1ve3+rKF5UAN4s08xDnOFnP23Bqiz1sh2myJPbZ2I5bVM1ERYtday0gyjdpFM+PTxH4rn3Chj3OqfzFvpMHWpkhbIiYtbXW1c3uUdBapKoBWstg8TDfRKnXOxdQukgFcWqgS8UrpTwx0Y1nTwETeodMH7QzAh0jgFVaScFkdHdqdU+4VcsLAbVnHOkYUUg+XILgY9Ge57P6+Dql8KfVpaz4IgS21XnCoAiZcAIIjTcF3TIVGkDQGmmIxMYqJmG6ipUEVyTlPoCrkGgHkYOVUF98Ycg9Zrg2pj5znUxdUAwA333YljnB56ocOY+0BhO9RctNJzjiSuohr26J6bnnzGdymLXFxyd5ciOlxIzsmhSkrurfBjv73xJWCuvt7FZTU35RsG9Qep0o9lfzNOerdCCBLXbFUY325tKcmEViIwHcNBbxFlwzRcs4NBS+NWEloPuWBCJcG0UahPGdxY80MQhU/TglgUBfN8fc1NJw/klAnxX3GFn0OAyBlGdE8GTXipYHj6v31lLokiTlGvAJ1GHbEUJ7hnDQ4dSHdO9w7NTFaiHBp/eVApSDfjvUw1tZqCuHCVao2XuSQUSK4GMMS+8eltypNuKi2SvoyfAFMP9HKtmUNwsVICRetv6t3TOBKboi9gkRVHep3VVXjb3GpzwzCdnK90ygM0hb7TFeU5GwXQv1j8bzUHChawbsMU6GNs0U60OZW8sCk96Xn6DUppRFHTHCfUhgAyUvXt2dcU2wAdMCUCIe41IbkYUz7bgE3lpZnpXnBWIBkA9NPtDCwlCWmc1EGuSEmQpBtn2mDAIKFFBASCslFNaWPLVtFAcKFOVhurFNtgxpiB+Kksnvn2ga0IHMOrh7PSwqXXFXAJorF9eIUYfQQjcaDMWpbjtFLIVgxYOKl2Z40+CBja8org7TFBjbT8aTEK+aFa0XUq1TZ1yaPWWX+tPOirgHmMrQ5CqaaaGV9ARMvUKyXtiwQq9SlLW7pEKiZBW+2T4TODeN/4tKCmEE7h3DR9mKOKrmwcvAlIa0R7l20mKdW8mZkT4plxkgBJSbU9kjrl9rmiCs3Fn3EsKWsQ/oOVmbCahLwbVAITiGB5BVTCebavzjPx8WGmXrQiNhC1w4aakgumauMqP34AN88OMqFY87cg/abqpP6RizbvoVbtnL+GiwHK9fXEipMjUB0SWuE5xLXgSUK/7+r0yREiXD5exhY2i6xrjTCxakLfR9w38S+CyaBFCifeWmpDHF/+wg8Lm0vy2nb4oXdEUsvuFgYNL94qqpqSRfsh+Sam4P4Dy1L60oMDHR4AXA/UvnmAkOwfH0YtWGhcfCxLilSneSBxY2jBNZjr+AGNamOFeDA8dz26DhaCKyV+bCuQS7Q1uJmvVhAA5otQe5iXQrBpONkDpTPYKolsGuInBPCuWHuj6WOCWJpSloEnuPBxEklXADjFFEW0MBMYdHpHkYhV9gKwBUgSliSKsuiRkhZnDRYk6KUbanr4VhCbZXiIcl6zkkSliAxSPDquZQxsUgTsTql+1RdaBknzJhZvW5jZVpOAoKXIYbkgGXtQ8tmFbYRTeCLQeOsunr4pexjrSyVFksxipmvt3SIVu2ASx2MP0lvshUZiUVQf0luU1n0GFlBwNjYPsovKH1ri0Pr+1ucG/gkOW+rR2ZUCkR2jUnGsKSOFZ6n0nZB6ZpVZdnH2KVy9hbi1df8arlFgexX696VxrQU4bA6RLWYKqH9Ce2dpUO0CEwBrJJOOSHWy3rgq5BigDgQSUzoFATBnhJcDQBzvLsuLUpVgVg1kuJuHDtsO3OXHSUqSYrBzjtaGp/I4aUiZoPUEHDDOHC4btdN34EjSjROyZfT3qkU4eq6n6RxZB1hIuslZQ4FAp9FsDzomHISKu5jl/GPqYApsbJIpth7sBBRW3o2LSAHLNVn+0HXjKD4EyeTyBtWQjrJ9dQzLKTsz0Lk5FpMiT3h2slVR8bcvKmdU7pnuW4FZ0dIOfQ6J7xdHDYM1d3a/2iQbK/B38q75hy2OfatPgiOtQ+azUoimvTahLDkJK0WweJgiZEYVhaWp8jJvoEQpDTS3KSbB8J+tLwQvVvzsJLR84QLE+JxOGlLTDYq2cw0pETum7Mq5BIowTuUk5qxSqSLJNHVbpNat0Sw/DtYCS/lrrlYQkhkbCxlW3F3kb0a03hwkmMKNInLKpXSd6CZMqqIrbLlcGVUE6Z4FWqqv5i9c0JYkpIWAMQHkYrsUrzSpN1HJYJFIDqVDNCBXg3aB04pmxM1Gvv/cftc2S7tWOdDI2alHENS6rEcuElt92DPirUlMEyt/7n5l+qkf2vXLMjdrxYJowMcJ51Y2s/pQ9f9nUIEFwhLTtJKAt1olEZoHIpzQy4pIm2JXoAjzjA7gSxBwPVSe1aG3c3cJ5oFw1/D/7crH/crqKsK79PruM2UfqUgyY7EZYMg707jpjho3mucGtSXTwEdW2vgLQaJM2wXr8aOSViay411zA22ltZRqles5f6AtKXZz7g+eGkLPy/Fl1FIKu1cjUAfDPqgEPOKqyxSy7RBmpyUTAsG2xj9DAe5yV8vgIDrlTZ+xOV/QYFtFV36ZY3/0lAyPqWLpFvCbgewMNqDnP6WkIgX4l05lXoMUvq5VuB2oX2BUXJtayg4D8kr45ZbboGzzjoL1q9fD1VVwTXXXNMq881vfhPe8IY3wKpVq+Doo4+Gl7zkJXDfffc19x977DG48MIL4dhjj4WnPOUpcM4558C+ffuSO19pAy65nFJ7lhQtP36Q/6FlEJxzecRISo2jwcqx4n5jlU9KVgQLUg6bSQUsshxvLXPsMcO0pX4AnRPmCJi0zmKB2TlIOfhIG9raDhgmLUif1p8SaG4J+A+IfOKBmZKclgtcxp61ozL+TFDPhdy9EHUcMs51bM3HEiW3+mD0HUhE8op/5JFH4JRTToErrriCvf+tb30LXvGKV8BJJ50EN910E/zd3/0dXHrppbBixYqmzLve9S744he/CJ///Ofh5ptvhvvvvx/e9KY35b8FApvdgssYUMJwzOjmsyUncUFENjLTXtQd3tfbXFpgKWuSOnKJWKUSKq3+PsFmBckkXLF3ZQ7f8S3+EM5a/3hO/N8aU5GDlDWeIw0nZN3oQ7uyKIDP2Z4IFkCGTevMM8+EM888U7z/X//rf4XXve51cNlllzXXnvnMZzZ/HzhwAD796U/D5z73OXjNa14DAABXXXUVnHzyyXDrrbfCy172sladhw4dgkOHDjX/Hzx4EABATJgrJoHloMYxEbdOSqRi3jukbjfoQCAi9gST16RQZzKo/UVLturbicWeJXk4RWKMUlDSzkHLDmD8PiUCr6mzC24rNbja+t5+jTeEq92OA+XdUuwrUjJfANs801ilkkl8rbFQgnculzSgspwhGJKHcaw8tZHFntX2IucR2awR4V0GSHMwqPISnePqOj1NUNc1/NVf/RX80A/9EJx++ulw/PHHw6ZNmwIV4q5du+Dw4cOwZcuW5tpJJ50EGzZsgB07drD1bt++HVatWtX8nHjiicybGHS9KTFX3KZeKA4p5bCzLGakFuSIYHFOMOWwT227NEdeArn9yZX0Shi4NUmBxjlxY17S5ZmTelPGNGc8BKeTzlByIBbbZ11yXUooJSH1IGkVJVoPPvggPPzww/ChD30IzjjjDPif//N/wo/92I/Bm970Jrj55psBAGDv3r1w5JFHwurVq4Nn165dC3v37mXr3bZtGxw4cKD52bNnT3Cf9cyjP5y4GnNBxZtHc1sdFFS1CZ/3aHInMna1Vv5BrU6BY43q3EumZ6IHHz0wu6qYTOUTCQQNRbC25dy4fJfErQyCfJqSnUzqE8C4PzSjCf1p3oP81PPhXHLPpTIsmsdfXwwKtvXivTUgY+shpbvywGNG7/n7ozKuGUvijMGdPdx80TaFdthyFrOHBOk8LHlOCCjq8l6PBv7ss8+Gd73rXQAA8IIXvAC++tWvwpVXXgk/+qM/mlXv8uXLYfny5fkdS9GvWg4yiXDVQnb14NFIyiRrfIv/P2qXKLzJc13JJcRcg0tzaiWlyJjqqnQuxhL15ToImYghmctSEnaKCq00zBlaLGENpExSiijDGJRgiFLGmevTBIKMi0paxx13HCxbtgye85znBNdPPvnkxntw3bp18Pjjj8P+/fuDMvv27YN169aV6wxNwyRNBsfR+OsAQ0Lkf+bndQ59MACYm4NqMBgSJoHzxUl8x8/KHkBVwP21U0slS3eDKuTSc+rASJJwFLsFx0lyXKc0Z31By1BCD2n/g6WPpq/MJyxwWIKk3hY+ERL3RsMODmSsUg8XiwRG29TQ8iAU7CcelnnWJBHfRoxh1OaCSmGx9F30ecHJJJC2JLMEvq+tf1Yirvlnc6HNv4SmL073+jagKNE68sgj4SUveQns3r07uP5P//RP8PSnPx0AAE499VQ44ogj4IYbbmju7969G+677z7YvHlzye6MUZJLq8kh0EXEpoipIFJQQJWSRMjohuQ2qNafUjaS0pKl5tVJoblFe0jfXiqVqYNDX4RdcvUGyJuH2HqT1G3Reg3aCykOjgtzkUIZuP810GTTbBmBKHHXYmUtRCtXZS7BKUxEJpLVgw8//DDcfffdzf/33HMP3HnnnbBmzRrYsGEDvPvd74Y3v/nN8MpXvhJe/epXw3XXXQdf/OIX4aabbgIAgFWrVsH5558PF198MaxZswZWrlwJb3/722Hz5s2s56AFzjloLZUSXwbFBIq7LklxI1VhK9sF8uLhOOSqqgDmxveSPo9SRzY0l6WCqBdpf9m+49yDXSUsqQzOkk77zD43KufHi6picgmZMaVWAO8t13iRjfpCvQm5NjTQ7PFWpBKsFG8/1rbj5Hmg4DKu4zWZyrhx5akn4fBiWr0Y+PtbAMO+smEIBRhlrO3xoLkKfVuclshavwd+J/y/Bs6rUpIWg/1k6yKHZKJ1++23w6tf/erm/4svvhgAAM477zy4+uqr4cd+7MfgyiuvhO3bt8M73vEOePaznw3/43/8D3jFK17RPPOxj30MBoMBnHPOOXDo0CE4/fTT4Xd+53fy3wJg5PbrwoHGKVImrRMfDIbqxAg4whQlVlLqqFzudhIqthRohx09UMVUUZEDM4ZUgmW19TkHAIiQLRQwcUqRFqzcteT6rNVn+SBjKjgibPk0CzefjeoQ3aMHPYccO3Afe5KeG5Spzz0nk2xzcbt/DJWbwki3gwcPwqpVq+ClZ/06rLnz36A+cBCqFSugmuMW38hzUBLvNTEZSVqB9IPtS1qd8/NjKQgfotTdPGehYHsGlnok7gm1HbwD6TO7HDgpy/+P70vIckFGY2uJL5HsCPTAoBxfTKVH31HLr6hJu9z7CHPQfpbj5A2Gf63OGLoSDrzuUuefSvGU85fevXR2F4C29K+VoZAysdAyeE9atUPS2aFJXLGvUtC2Vfsfw+hIklZVAcwNAOZrcN/7HrgfOB7++SdWw6Fl34M9l7wXDhw4ACtXrtT7hpsyl5xmGLz6NIh0XcoZJlbE2Jk41WK8Q6gPGWqBFMTUjil19AlNNeEhEWTNLZj+byKcE9hWkt2wKzBDw9lzrHUAsOPgHYCkn3HBhAM0FUlZ7oVsNDRFFfdcDjp+7ijsQyqzQNpO2UuafbMwpj/Lu2SDGaECCEV4joNkDzOeU23sZ75O+k0c9Pz4kwR1KHpz4nSQDZlZ8FTqo8QES11BvU0FxM5jUAXkEhtOJZPqrm2x4XA2MJqNwOrgwV2XDOW1AxgI6kec4TyQFpj3sZ5reF1ZDyLO9qKWj9RL942kWmQk++FlW7+rqkJfKfDqOKZgovTWan+u/byqdGL3LJayrRISoyqs3TBbzrCjNns8PsukOc4hHLRt7Zyg65Jr318v5bkIS0XSqiOcJlb1cfdoXRZuhxCoVh3N30QSosSFM7amInERFNcIc16PEwgybJAjZcSkk2QPNaw2Rgd3ikMI51Le6lfC3E1C4gvaKzPnYtJdj64ESynHSn1Mm2wZc4cM85JyHkhegqVg9WCM/R2Uz+/O9Etajc1q0CyipEzKioqvlWmCbhasa+fqaeJ1yCFU1aNio4XvORKNw+K+3UWNmlH32XrIEQ4IZ6Z5tKV4zUnXJqEeDLz0qva7iZvHYLPIAZY2S9XN2euktgHGEh/AkI3vYueKrTNBwkoC4tbZ78N1BescwuyrpnhcSm3OHNpH6cxQ1gSrxZFAv8EV6WdnSB6BHKi3Y2PX9wxZt65MP9FiIH5YERMFC7ECsHtLtS5FCGdVjdUCEsdWcjFaxfzgekH9es4h1tUDEMA+drnSGmYCAEI1SSrR1tzNNXsPrYP+jQ/K0t55QR/TCBabHYZxLCjyodQgtoqMn8Qo+jOCjhfjdNVimLtoGkyqe8Es4e8tFLBZxarqTMSSJFoYjdcfgCgRRT3mANSDAnsWmjeXc6Guni5U+ndLHGcWZizGxjkAGB60njNsbTbN8SL34MjOJp/hsQWg288kmxMXM+TL43q5+ihaqiW5q8n15XjOsbaXiIdjC6j8HCPJZngJmtRro7XfmXBZCAEFPmwlQkAIWFTbo4xR57ylUlxpSr2pBCaVAUJ7Lvdtp5poBelAfJZ3i2oQlVOlKmpIhzlSnSKhUXAExY0JSFIgsQYLd48kBLY+ShD6UjtIhGJSsAS+4j61nA6oh5tgv4qFWJhd36mEkHYAOudCou37aZFm6Xgo6kCTmr4gkglaDvfPaWjaHelXRadB61cXIiYhxlSX9PYkmGqiBQCh/aiSJaZAsqAES/Icw22M1Hmtgz7GfVcDABjZnlKyDTAEVm1T6oPkTViB3aOPHrr04CsNawwSd116FqtaY9ITABo/y4Fe6QQs6J8y3pUiBUn9E8Cp3prDPXg2ZMR8Oble/p4WJI9zJKrMWYY05OszES5cf8xelHKvJgxKhmRYUSJggeCiTtttjbk0zl3UeP6cojbCVuxX9zNjuomWnxtpAWJV1+jwiRp1uaBGWmeOg0EJLkzz2Ilx8626iOMCQNr7pBBgK0pnkG/qZQ4Eye4Uyw7RCjKOlMfPWDcsldgSJKxo1YIUFLWr+n/jDcCogaDuJLU50y5XT4p2InCqSLEXWaA4c3AoYqPDQHVx9aamhYtCkrIkwlUY0020LMDJWmOHtOZd5e1BAGPuWzpgOe4cexAGcTpIRQgwnmxqZPXXNc8t/L91U1DDPWfvmSS4zBEANgKZwsVJxIve9+DKCfYcejiIjFJM0qXSLFNWOgBFBwftANMONUk9JhEZwYnI1Ff/XDO+ld5vrR6uTxbQfSjdz4BEWKKENbKnNUKoSnKpEpakBaJnFfts93Nl6RMtgPEiwIQruB+R1ADCA0Mjbr4d2j5+thXky7WNNk3P3kDBwRcjXCkEsaQ7OXUhty5+60EWeydREhmQYkQC04zzTOwPhqYV4D4rIx5amqRpIVTcNexZFwGrhkfvLkoCAuHKQq6UsVCeeEZVqUViKyZhcQw1rZuLWZ15D2agIgSHO/RidiqfpTsGiyeiIxJbRaQt3G8m/2FwP3cjC4cta6z3iBEKzdbEtKlCivvqGvtkcZbgytJnrHkkGeIV72OVVr55LEF6wtcth0rLrmlwTNDAEDCWeOV4/VkJsTXhQKxO6ZlEtOyQyrunqBej9VoIS5AwQXG3x20AiP3n3A+sWNpEa1AB+ETrgUgrSEqSBKYdkJojRiyxrE8F5FWP1BXdsjBjmyliexIPRkt2bIycT49I4FRw1FOUIkd9qGWrEB06ZG85Vu1ikUi4wynFRilJiymESVpvEiFsJHMj8eJCKYRP0IiSV0wqFteyFCbAaDEYdWawF41qOxNRYdTDFvtTjj2sE+Hi5pZTn0bXwPB+149ALh2i5WpoMky07jF6Vw/tsKMHl5R/LJZwVSuLXeljrug9gA3w7APWgNimvHBgDZh7/rpWR4xQZjg7sMRKC1y1xPr4Z6hHmgWphzZ9poSzEAZlNDSP04iqNAsWCbKE+j133CwONQxDk+vIESWIkh2PY0wk1bG2bgupCaebaHVZ11Kqoknna/N9wYexZEvrksDWiKwNEZOyrAGxLULFqCex80MALX6mLR1ZkGTYtx6QVuQcpqmftUi9JyEirRT3luOQOhcYwlgvqq82FexLVHqUpC7uGqc2rtE+ZZ5xHRmS6SZaAGkbO+aAkaK2slyn9hdX6+7l2EPRP2eJIcsBcgjB0pZpo1LO0kropXGKESvuessVnfHoFMoncfFR9StqI3UzxsbaHwAxtU2u9xdAu8+p6ynlcxaR8AKzTS7mZdcFRk9FCdmEjlGRarkPe2UELKpk6Trn8p6i8TBg+omWFanfhaISTk5sjJT7jdaPgTciF6NTgmBhcAHTuM8U3DvEJEAa0yQdmkaJiM320AzTXNgn6dMY0mGdqtLocmhaPBdTA11TVVXZh2yHdSi47qN/bPXkluP6a5FsOxLIVEITU91L94oTsxQHHw+/NhuJqwIQAtNTsbSJliZZScGk3DVNjWVZIBZ3bUwYOacRjJinY4zAluBOA2O8QLykAFxJspIIjNQ+QIsr5hKWqrYnzVGisKtuMnIkKW09xj63zpXp0l4EUUaiBCx1WglwZvzUuJhBCkXItevFtCbJDEIpiRb1pwv9mn6iRTaeybGgJof+oMq2e5gn3eKuTW1ZVCLRiJX00cSY3ciX75KVWmoLtxf5+qvogechbRxqpOaepfr1HPuHxK33oaLqIzZI8wDTIEmgmQd1NPNGCoGOEeGSBMtQppSEU8oxKil+z0Ma/5R17lwv8Vke00+06pp1U53IN5xSkZL6iEqC/h3VLOwJ8VBddMuxT1xEJKlxMYHj4zzuYh52Utu0Tom7jx04MQ68FLq4F5fM3kDfKfEdkw5wC8HixqCr55+UFT0Di41g9VJ/jHA5RxjrEeEKNFPe7T2/G9NPtDw4QkU/ksihhIQRAz4YVUcMZgP6ODOqVtTc6qnUJanpNFi9FkXOOXSCUCUpiaDge2wfM6SkmC3LevhMIlMC7ZsxDmYSyD2kA02IVeKVAlv9cwW++m0N/aBu48WJVVfHGHuD7WuxOcAfnuTgJSz/d8WfIbM4LQlWh4vmS7ckxVMqMdMkDpPagaY/oZKWEKBM0TyHpC7ua75UKtNUihwB0wKNJS+/0t51KfXE5oJyiNZ6c5FiF01tpys3Hfxb/pAOXK4xQaaHppgIO3JdSjNE5xgRLPw7hXi1+yDYdSO259aYRMbZkjhYzXHISU1Wpoh+NVmSyrGakLUF6s1JmH6i5QemduA/Yy/CH7SSmk6yC2FQWxOG9BkQrZwgPTULbr7NAZryjflqPfHCSXz9n5yjB00qTMHZ5iKZyOWsGxkqypREprhMbMxSXLebIh0O8L44aFR3ripIyxTewOoxSstTl25c7/w8cEi2i3GHa9CX8VwHeRENn3thoY2FtNcByJ6sWmNiXV+p/Wqy7tAbVilPSX9Fx7OqqnHcVmA/zGespp9oUXAf76P3cbwURe4n3qV8hQD6ZqAu5HXigRMzeFolLwViIlfJsSRH5Wp935xUMynIOShKObIURuonKYq6SksHOdoTMZWc1B9zFhe6N4jTRef3tdinNY2Pa+/J6Lf6AOS1JpUXzjQ2tZMFnJSmaTAK2+mWHtHyoB/Uw58Ih7n0uC0Ae1wSRaw8DT5Orr8tXbQ25Lw/wPx7V8hBwvdDcJTgrjsHMGf8LAZFjpeXBsEDLOcjfMkHWetzM8y7WA6fvhyHBu1vUC2KTA/0kNben3EwwhnwW0G4gS0lXYJmuxAjJipxiql/54d9luzXGFLCgVg/Ao0OP44poKu8NY8+Jd1ceWZuqolWkA5EclvHmRJobBH3CY6Iazb7v/XAsWTOoFIg5uKRFDaOR8pYddxYkbEUjeXYm8/K8cbUOT0g9WAW1V+lQgEs6rTShGvS0h/7KRthfSaHllB7lyDdxrzbJCykpNySWhhC29deydQSJGfRoejgPjjVRAsqYFyhSdLcRhoYSVf+lqvHEob0SXUuxomboBgxisVKeW7E9xM/i7+GPgfNgvY2K04FpEo/A/KbxlH58auqkCD6cnNz4zGwqN+0fGVaTFYKGA+ylkRhVbWkJD/mIDm0UKKkBbXnMkHGbP5Fpa2Uz9WU+JyMR0mCFVTRroP9DloKg6E5VqQQIymWFPfF6iAlPZ9AwFrqRU17VSWubQVTTbRcBcMDa25udMiiA7cp5MabtOVE4G1JkYY0l+lWp4QFENQhemigZ7nbDi3ckHgFjg+gHJD+eUysqNMEJljc+/pNSLNKSFKfFBslweJFRe/RvuB5B7Bvxlh2D6l8St0jiF5fiWd7w/Gy/AHfhsW5x+QAlEqIyFpLnh/cdMq+ZNaS1dbHEnkuRRuAQjCUtqQxdEiroSU/kPoigZZPHHs6bq3zlf0OHWnLVU9O70E3VwEcsQyqI48EmJuDajAQ3WabYZ7DIi2RapqKO3CH3PzHgmqbDTGQpQNXh/ph732DJTEMfJ0hmNXcQJam6N8xzM3Jz1hc3jnVnEWlyBmDfV+8BJqZIy/4rpnWD82tWOqnh0DkK6sDCXm+acWSromUjR7g5J1yCevoH/46RUoMlkVN3iGeS+xlcJ4AyBtyVE/CvnJOGCfJCSLVfqSVT7FL13Vj51bXxWAAsGwZuLkKBk8ADDI/0zHVROuJoyqYP24lDI5aDu7IZVAPBoKEMvzFBrXhayVEeA2eIwZg0/O3+qf1LWX/4TEZtVv7RTeA9vsV1J8Hnq1GdVaj7mY5e6MKqFFhRruI2mUIEP5baZs+27wCS8iNHdL6HqujLtQOQfOeVgYArXkAaocW2qf9jvUvVetYKAa70vanJEVntqWuJ9oXqY7Rs1JZ8ydDhPmpnGuNg6uqYb1zFcC8g8HjT8Ch44+Gx9bOQ73skK09gqkmWgefAfDE8Sth8ATA4aMBnGeyK2LnWwTOUiLwOklRkwtlPZGg98WwiEq4b+0XfY6W1faBxCNozlExLdCk5t33w9JGLg8g1R2rT5kDdn0sxP6IjV9sXcXKp6DL+l5KsIwhfn/hjKhcu5w/k+cOATy6zsGHXvs52Hjo2/CyjG5OJdHyovh8/RgcGlRQLQN4Ys6B829TQXsQTRV37FjixuEOYLWvJTcMJVYS0Uo5LFIPWal8yiEiPNsaR2d4NgF47mJtReMoyXoV6zXWpzIszLxmOXLlPBMjntC+3zwaaa9L1vBUJslM8DMYNhUF1y8Hyxjid2f3gEM/zc3RTw0wWFbB48uegKcdegBOeHzF8JFEp6DKLYqgjTT867/+K5x44okL3Y0ZZphhhhk6Ys+ePfC0pz3NXH4qiVZd17B79254znOeA3v27IGVK1cudJc64+DBg3DiiScuifeZvcvixVJ6n9m7LF5Y3sc5B9/97ndh/fr1MEiIN51K9eBgMIAf+IEfAACAlStXLolJ9lhK7zN7l8WLpfQ+s3dZvIi9z6pVq5LrzIz2m2GGGWaYYYbJY0a0ZphhhhlmmBpMLdFavnw5vP/974fly5cvdFeKYCm9z+xdFi+W0vvM3mXxos/3mUpHjBlmmGGGGZ6cmFpJa4YZZphhhicfZkRrhhlmmGGGqcGMaM0wwwwzzDA1mBGtGWaYYYYZpgYzojXDDDPMMMPUYGqJ1hVXXAHPeMYzYMWKFbBp0yb42te+ttBdimL79u3wkpe8BI455hg4/vjj4Y1vfCPs3r07KPOqV70KqqoKfn7+539+gXos49d+7dda/TzppJOa+4899hhceOGFcOyxx8JTnvIUOOecc2Dfvn0L2GMdz3jGM1rvU1UVXHjhhQCwuOfllltugbPOOgvWr18PVVXBNddcE9x3zsH73vc+OOGEE+Coo46CLVu2wF133RWUeeihh2Dr1q2wcuVKWL16NZx//vnw8MMPT/AthtDe5fDhw3DJJZfA8573PDj66KNh/fr18NM//dNw//33B3Vwc/mhD31owm8yRGxufuZnfqbV1zPOOCMoMw1zAwDs/qmqCn77t3+7KVNibqaSaP3pn/4pXHzxxfD+978f7rjjDjjllFPg9NNPhwcffHChu6bi5ptvhgsvvBBuvfVWuP766+Hw4cPw2te+Fh555JGg3Nve9jZ44IEHmp/LLrtsgXqs44d/+IeDfv7t3/5tc+9d73oXfPGLX4TPf/7zcPPNN8P9998Pb3rTmxawtzpuu+224F2uv/56AAD4iZ/4iabMYp2XRx55BE455RS44oor2PuXXXYZXH755XDllVfCzp074eijj4bTTz8dHnvssabM1q1b4Rvf+AZcf/31cO2118Itt9wCF1xwwaReoYH2Lo8++ijccccdcOmll8Idd9wBf/7nfw67d++GN7zhDa2yH/zgB4O5evvb3z6J7rcQmxsAgDPOOCPo6x//8R8H96dhbgAgeIcHHngA/uAP/gCqqoJzzjknKNd5btwU4qUvfam78MILm//n5+fd+vXr3fbt2xewV+l48MEHHQC4m2++ubn2oz/6o+6XfumXFq5TRrz//e93p5xyCntv//797ogjjnCf//znm2vf/OY3HQC4HTt2TKiH3fBLv/RL7pnPfKar69o5Nz3zAgDuC1/4QvN/Xddu3bp17rd/+7eba/v373fLly93f/zHf+ycc+4f/uEfHAC42267rSnz13/9166qKvftb397Yn2noO/C4Wtf+5oDAHfvvfc2157+9Ke7j33sY/12LgPc+5x33nnu7LPPFp+Z5rk5++yz3Wte85rgWom5mTpJ6/HHH4ddu3bBli1bmmuDwQC2bNkCO3bsWMCepePAgQMAALBmzZrg+mc/+1k47rjj4LnPfS5s27YNHn300YXoXhR33XUXrF+/Hn7wB38Qtm7dCvfddx8AAOzatQsOHz4czNFJJ50EGzZsmIo5evzxx+GP/uiP4Gd/9meDz5xPy7xg3HPPPbB3795gLlatWgWbNm1q5mLHjh2wevVqePGLX9yU2bJlCwwGA9i5c+fE+5yCAwcOQFVVsHr16uD6hz70ITj22GPhhS98Ifz2b/82PPHEEwvTQQNuuukmOP744+HZz342/MIv/AJ85zvfae5N69zs27cP/uqv/grOP//81r2uczN1Wd7/7d/+Debn52Ht2rXB9bVr18I//uM/LlCv0lHXNbzzne+El7/85fDc5z63uf4f/+N/hKc//emwfv16+Lu/+zu45JJLYPfu3fDnf/7nC9jbNjZt2gRXX301PPvZz4YHHngAPvCBD8C///f/Hv7+7/8e9u7dC0ceeWTrIFm7di3s3bt3YTqcgGuuuQb2798PP/MzP9Ncm5Z5ofDjze0Xf2/v3r1w/PHHB/eXLVsGa9asWdTz9dhjj8Ell1wCb3nLW4JM4u94xzvgRS96EaxZswa++tWvwrZt2+CBBx6Aj370owvYWx5nnHEGvOlNb4KNGzfCt771Lfgv/+W/wJlnngk7duyAubm5qZ2bz3zmM3DMMce0TAIl5mbqiNZSwYUXXgh///d/H9iBACDQVT/vec+DE044AU477TT41re+Bc985jMn3U0RZ555ZvP385//fNi0aRM8/elPhz/7sz+Do446agF71h2f/vSn4cwzz4T169c316ZlXp4sOHz4MPzkT/4kOOfgU5/6VHDv4osvbv5+/vOfD0ceeST83M/9HGzfvn3R5fY799xzm7+f97znwfOf/3x45jOfCTfddBOcdtppC9izbviDP/gD2Lp1K6xYsSK4XmJupk49eNxxx8Hc3FzLE23fvn2wbt26BepVGi666CK49tpr4Stf+Ur0i52bNm0CAIC77757El3LxurVq+GHfuiH4O6774Z169bB448/Dvv37w/KTMMc3XvvvfDlL38Z/vN//s9quWmZFz/e2n5Zt25dy4npiSeegIceemhRzpcnWPfeey9cf/310e9Pbdq0CZ544gn4l3/5l8l0sAN+8Ad/EI477rhmXU3b3AAA/M3f/A3s3r07uocA8uZm6ojWkUceCaeeeirccMMNzbW6ruGGG26AzZs3L2DP4nDOwUUXXQRf+MIX4MYbb4SNGzdGn7nzzjsBAOCEE07ouXfd8PDDD8O3vvUtOOGEE+DUU0+FI444Ipij3bt3w3333bfo5+iqq66C448/Hl7/+ter5aZlXjZu3Ajr1q0L5uLgwYOwc+fOZi42b94M+/fvh127djVlbrzxRqjruiHOiwWeYN11113w5S9/GY499tjoM3feeScMBoOWmm0x4l//9V/hO9/5TrOupmluPD796U/DqaeeCqecckq0bNbcdHLjWCD8yZ/8iVu+fLm7+uqr3T/8wz+4Cy64wK1evdrt3bt3obum4hd+4RfcqlWr3E033eQeeOCB5ufRRx91zjl39913uw9+8IPu9ttvd/fcc4/7i7/4C/eDP/iD7pWvfOUC97yNX/7lX3Y33XSTu+eee9z/+l//y23ZssUdd9xx7sEHH3TOOffzP//zbsOGDe7GG290t99+u9u8ebPbvHnzAvdax/z8vNuwYYO75JJLguuLfV6++93vuq9//evu61//ugMA99GPftR9/etfbzzqPvShD7nVq1e7v/iLv3B/93d/584++2y3ceNG973vfa+p44wzznAvfOEL3c6dO93f/u3fumc961nuLW95y6J6l8cff9y94Q1vcE972tPcnXfeGeyhQ4cOOeec++pXv+o+9rGPuTvvvNN961vfcn/0R3/kvv/7v9/99E//9MTfJfY+3/3ud92v/MqvuB07drh77rnHffnLX3YvetGL3LOe9Sz32GOPNXVMw9x4HDhwwH3f932f+9SnPtV6vtTcTCXRcs65T3ziE27Dhg3uyCOPdC996UvdrbfeutBdigIA2J+rrrrKOefcfffd5175yle6NWvWuOXLl7t/9+/+nXv3u9/tDhw4sLAdZ/DmN7/ZnXDCCe7II490P/ADP+De/OY3u7vvvru5/73vfc/94i/+onvqU5/qvu/7vs/92I/9mHvggQcWsMdxfOlLX3IA4Hbv3h1cX+zz8pWvfIVdV+edd55zbuj2fumll7q1a9e65cuXu9NOO631jt/5znfcW97yFveUpzzFrVy50r31rW913/3udxfVu9xzzz3iHvrKV77inHNu165dbtOmTW7VqlVuxYoV7uSTT3a/9Vu/FRCBxfI+jz76qHvta1/rvv/7v98dccQR7ulPf7p729ve1mK+p2FuPH73d3/XHXXUUW7//v2t50vNzex7WjPMMMMMM0wNps6mNcMMM8www5MXM6I1wwwzzDDD1GBGtGaYYYYZZpgazIjWDDPMMMMMU4MZ0ZphhhlmmGFqMCNaM8wwwwwzTA1mRGuGGWaYYYapwYxozTDDDDPMMDWYEa0ZZphhhhmmBjOiNcMMM8www9RgRrRmmGGGGWaYGvx/B6ISnzZmNiwAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "plt.imshow(out)" ] @@ -228,8 +151,7 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/pyproject.toml b/pyproject.toml index a3f74ea5..e99ed3aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,10 +39,6 @@ classifiers = [ "Typing :: Typed", ] dependencies = [ - 'torch', - 'torchvision', - 'lightning', - 'segmentation_models_pytorch', 'albumentations', 'tifffile', 'pyyaml', @@ -57,7 +53,7 @@ dependencies = [ dev = ["pre-commit", "pytest", "pytest-cov"] # test for ci -test = ["pytest", "pytest-cov", "wandb"] +test = ["pytest", "pytest-cov", "wandb", "pytorch_lightning"] # use wandb for logging wandb = ["wandb"] @@ -137,8 +133,6 @@ line-length = 88 # https://mypy.readthedocs.io/en/stable/config_file.html [tool.mypy] -plugins = ["pydantic.mypy"] - files = "src/**/" strict = false allow_untyped_defs = false diff --git a/src/careamics/__init__.py b/src/careamics/__init__.py index 58524882..b1866082 100644 --- a/src/careamics/__init__.py +++ b/src/careamics/__init__.py @@ -8,19 +8,19 @@ __version__ = "uninstalled" __all__ = [ - "CAREamist", + "CAREamist", "CAREamicsKiln", - "CAREamicsModule", - "Configuration", - "load_configuration", - "save_configuration" + "CAREamicsModule", + "Configuration", + "load_configuration", + "save_configuration", ] -from .config import Configuration, load_configuration, save_configuration from .careamist import CAREamist -from .lightning_module import ( - CAREamicsKiln, - CAREamicsModule, +from .config import Configuration, load_configuration, save_configuration +from .lightning_module import CAREamicsModule +from .lightning_prediction import CAREamicsFiring +from .ligthning_datamodule import ( + CAREamicsPredictDataModule, CAREamicsTrainDataModule, - CAREamicsPredictDataModule ) diff --git a/src/careamics/bioimage/io.py b/src/careamics/bioimage/io.py index 6ddbd646..9fb54227 100644 --- a/src/careamics/bioimage/io.py +++ b/src/careamics/bioimage/io.py @@ -7,7 +7,7 @@ from bioimageio.core.build_spec import build_model from bioimageio.spec.model.raw_nodes import Model -from careamics.config.config import ( +from careamics.config.configuration_model import ( Configuration, ) diff --git a/src/careamics/careamist.py b/src/careamics/careamist.py index 9a93ce4b..f4fc6d11 100644 --- a/src/careamics/careamist.py +++ b/src/careamics/careamist.py @@ -3,15 +3,14 @@ import numpy as np from pytorch_lightning import Trainer -from torch.utils.data.dataloader import DataLoader -from careamics.config import Configuration, load_configuration -from careamics.lightning_module import CAREamicsKiln -from careamics.dataset.prepare_dataset import ( - get_train_dataset, - get_validation_dataset, - get_prediction_dataset, -) +from .config import Configuration, load_configuration +from .config.support import SupportedAlgorithm +from .lightning_module import CAREamicsKiln +from .lightning_prediction import CAREamicsFiring +from .ligthning_datamodule import CAREamicsClay, CAREamicsWood +from .utils import check_path_exists, method_dispatch + # TODO callbacks # TODO save as modelzoo, lightning and pytorch_dict @@ -71,7 +70,7 @@ def __init__( f"`config` must be a Configuration object, " f"got {type(configuration)}" ) - + self.cfg = configuration elif path_to_config is not None: @@ -81,11 +80,9 @@ def __init__( f"Configuration path {path_to_config} does not exist." ) elif not path_to_config.is_file(): - raise ValueError( - f"Configuration path {path_to_config} is not a file." - ) + raise ValueError(f"Configuration path {path_to_config} is not a file.") - # load configuration + # load configuration self.cfg = load_configuration(path_to_config) else: @@ -100,111 +97,159 @@ def __init__( # instantiate trainer self.trainer = Trainer(max_epochs=self.cfg.training.num_epochs) - # TODO: @functools single dispatch + # change the prediction loop + self.trainer.predict_loop = CAREamicsFiring(self.trainer) + + @method_dispatch def train( self, - train_dataloader: DataLoader, - val_dataloader: Optional[DataLoader] = None, + datamodule: CAREamicsWood, ) -> None: - self.trainer.fit(self.model, train_dataloader, val_dataloader) + if not isinstance(datamodule, CAREamicsWood): + raise TypeError( + f"`datamodule` must be a CAREamicsWood instance, " + f"got {type(datamodule)}." + ) + + self.trainer.fit(self.model, datamodule=datamodule) - def train_on_path( + @train.register + def _train_on_path( self, - path_to_train_data: Union[Path, str], - path_to_val_data: Union[Path, str], + path_to_train_data: Path, # cannot use Union annotation for the dispatch + path_to_val_data: Optional[Path] = None, + path_to_train_target: Optional[Path] = None, + path_to_val_target: Optional[Path] = None, + use_in_memory: bool = True, ) -> None: - # sanity check on train data - path_to_train_data = Path(path_to_train_data) - if not path_to_train_data.exists(): - raise FileNotFoundError( - f"Data path {path_to_train_data} is incorrect or" - f" does not exist." - ) - elif not path_to_train_data.is_dir(): - raise ValueError( - f"Data path {path_to_train_data} is not a directory." - ) - - # sanity check on val data - path_to_val_data = Path(path_to_val_data) - if not path_to_val_data.exists(): - raise FileNotFoundError( - f"Data path {path_to_val_data} is incorrect or" - f" does not exist." - ) - elif not path_to_val_data.is_dir(): - raise ValueError( - f"Data path {path_to_val_data} is not a directory." - ) + # sanity check on data (path exists) + path_to_train_data = check_path_exists(path_to_train_data) - # create datasets and dataloaders - train_dataset = get_train_dataset(self.cfg.data, path_to_train_data) - train_dataloader = DataLoader( - train_dataset, - batch_size=self.cfg.training.batch_size, - num_workers=0#self.cfg.training.num_workers, - ) + if path_to_val_data is not None: + path_to_val_data = check_path_exists(path_to_val_data) + + if path_to_train_target is not None: + if ( + self.cfg.algorithm.algorithm + in SupportedAlgorithm.get_unsupervised_algorithms() + ): + raise ValueError( + f"Training target is not needed for unsupervised algorithms " + f"({self.cfg.algorithm.algorithm})." + ) + + path_to_train_target = check_path_exists(path_to_train_target) - val_dataset = get_validation_dataset(self.cfg.data, path_to_val_data) - val_dataloader = DataLoader( - val_dataset, - batch_size=1, - num_workers=0, + if path_to_val_target is not None: + path_to_val_target = check_path_exists(path_to_val_target) + + # create datamodule + datamodule = CAREamicsWood( + data_config=self.cfg.data, + train_data=path_to_train_data, + val_data=path_to_val_data, + train_data_target=path_to_train_target, + val_data_target=path_to_val_target, + use_in_memory=use_in_memory, ) # train - self.train(train_dataloader=train_dataloader, val_dataloader=val_dataloader) + self.train(datamodule=datamodule) + + @train.register + def _train_on_str( + self, + path_to_train_data: str, + path_to_val_data: Optional[str] = None, + path_to_train_target: Optional[str] = None, + path_to_val_target: Optional[str] = None, + use_in_memory: bool = True, + ) -> None: + self._train_on_path( + Path(path_to_train_data), + Path(path_to_val_data) if path_to_val_data is not None else None, + Path(path_to_train_target) if path_to_train_target is not None else None, + Path(path_to_val_target) if path_to_val_target is not None else None, + use_in_memory=use_in_memory, + ) + + @train.register + def _train_on_array( + self, + train_data: np.ndarray, + val_data: Optional[np.ndarray] = None, + train_target: Optional[np.ndarray] = None, + val_target: Optional[np.ndarray] = None, + ) -> None: + if train_target is not None: + if ( + self.cfg.algorithm.algorithm + in SupportedAlgorithm.get_unsupervised_algorithms() + ): + raise ValueError( + f"Training target is not needed for unsupervised algorithms " + f"({self.cfg.algorithm.algorithm})." + ) + + # create datamodule + datamodule = CAREamicsWood( + data_config=self.cfg.data, + train_data=train_data, + val_data=val_data, + train_data_target=train_target, + val_data_target=val_target, + ) + # train + self.train(datamodule=datamodule) + @method_dispatch def predict( self, - test_dataloader: Optional[DataLoader] = None, + datamodule: CAREamicsClay, ) -> Dict[str, np.ndarray]: + if not isinstance(datamodule, CAREamicsClay): + raise TypeError( + f"`datamodule` must be a CAREamicsClay instance, " + f"got {type(datamodule)}." + ) - return self.trainer.predict(self.model, test_dataloader) + return self.trainer.predict(self.model, datamodule=datamodule) - def predict_on_path( + @predict.register + def _predict_on_path( self, - path_to_data: Union[Path, str], - tile_shape: Optional[tuple] = None, - overlaps: Optional[tuple] = None, + path_to_data: Path, ) -> Dict[str, np.ndarray]: - path = Path(path_to_data) - if not path.exists(): - raise FileNotFoundError( - f"Data path {path_to_data} is incorrect or" - f" does not exist." - ) - elif not path.is_dir(): - raise ValueError( - f"Data path {path_to_data} is not a directory." - ) + # sanity check (path exists) + path = check_path_exists(path_to_data) - # create dataset - pred_dataset = get_prediction_dataset( - self.cfg.data, - path_to_data, - tile_shape=tile_shape, - overlaps=overlaps, - ) - - # create dataloader - pred_dataloader = DataLoader( - pred_dataset, - batch_size=self.cfg.training.batch_size, - num_workers=self.cfg.training.num_workers, + # create datamodule + datamodule = CAREamicsClay( + data_config=self.cfg.data, + pred_data=path, ) - # TODO how to deal with stitching? + return self.predict(datamodule) - # predict - return self.predict(pred_dataloader) + @predict.register + def _predict_on_str( + self, + path_to_data: str, + ) -> Dict[str, np.ndarray]: + path_to_data = Path(path_to_data) + return self._predict_on_path(path_to_data) - def save( + @predict.register + def _predict_on_array( self, - format: str = "modelzoo", # TODO Enum - ): - raise NotImplementedError( - "Saving is not implemented yet." + data: np.ndarray, + ) -> Dict[str, np.ndarray]: + # create datamodule + datamodule = CAREamicsClay( + data_config=self.cfg.data, + pred_data=data, ) + + return self.predict(datamodule) diff --git a/src/careamics/config/__init__.py b/src/careamics/config/__init__.py index c58f10a7..d937fa66 100644 --- a/src/careamics/config/__init__.py +++ b/src/careamics/config/__init__.py @@ -9,10 +9,10 @@ "save_configuration", ] -from .algorithm import AlgorithmModel -from .config import ( +from .algorithm_model import AlgorithmModel +from .configuration_model import ( Configuration, load_configuration, save_configuration, ) -from .data import DataModel +from .data_model import DataModel diff --git a/src/careamics/config/algorithm.py b/src/careamics/config/algorithm.py deleted file mode 100644 index e42cda63..00000000 --- a/src/careamics/config/algorithm.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Algorithm configuration.""" -from typing import Literal, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, -) - -from .architectures import UNetModel, VAEModel, CustomModel -from .optimizers import LrSchedulerModel, OptimizerModel - -#from .noise_models import NoiseModel - -class AlgorithmModel(BaseModel): - """ - Algorithm configuration. - - The minimum algorithm configuration is composed of the following fields: - - loss: - Loss to use, currently only supports n2v. - - model: - Model to use, currently only supports UNet. - - Attributes - ---------- - loss : str - List of losses to use, currently only supports n2v. - model : Models - Model to use, currently only supports UNet. - is_3D : bool - Whether to use a 3D model or not. - masking_strategy : MaskingStrategies - Masking strategy to use, currently only supports default masking. - masked_pixel_percentage : float - Percentage of pixels to be masked in each patch. - roi_size : int - Size of the region of interest used in the masking scheme. - model_parameters : ModelParameters - Model parameters, see ModelParameters for more details. - """ - - # Pydantic class configuration - model_config = ConfigDict( - protected_namespaces=(), # allows to use model_* as a field name - validate_assignment=True, - ) - - # Mandatory fields - algorithm: Literal["n2v", "n2v2"] - loss: Literal["n2v", "mae", "mse"] - model: Union[UNetModel, VAEModel, CustomModel] = Field(discriminator="architecture") - - optimizer: OptimizerModel - lr_scheduler: LrSchedulerModel - - # Optional fields, define a default value - #noise_model: Optional[NoiseModel] = None - - - # def get_noise_model(self, noise_model: Dict, info: ValidationInfo) -> Dict: - # """ - # Validate noise model. - - # Returns - # ------- - # Dict - # Validated noise model. - # """ - # # TODO validate noise model - # if "noise_model_type" not in info.data: - # raise ValueError("Noise model is missing.") - - # noise_model_type = info.data["noise_model_type"] - - # # TODO this does not exist - # if noise_model is not None: - # _ = NoiseModel.get_noise_model(noise_model_type, noise_model) - - # return noise_model - - # TODO think in terms of validation of Algorithm and entry point in Lightning - # TODO we might need to do the model validation in the overall configuration - # @model_validator(mode="after") - # def algorithm_cross_validation(cls, data: Algorithm) -> Algorithm: - # """Validate loss. - - # Returns - # ------- - # Loss - # Validated loss. - - # Raises - # ------ - # ValueError - # If the loss is not supported or inconsistent with the noise model. - # """ - # if data.algorithm_type in [ - # AlgorithmType.CARE, - # AlgorithmType.N2N, - # ] and data.loss not in [ - # Loss.MSE, - # Loss.MAE, - # Loss.CUSTOM, - # ]: - # raise ValueError( - # f"Algorithm {data.algorithm_type} does not support" - # f" {data.loss.upper()} loss. Please refer to the documentation" - # # TODO add link to documentation - # ) - - # if ( - # data.algorithm_type in [AlgorithmType.CARE, AlgorithmType.N2N] - # and data.noise_model is not None - # ): - # raise ValueError( - # f"Algorithm {data.algorithm_type} isn't compatible with a noise model." - # ) - - # if data.algorithm_type in [AlgorithmType.N2V, AlgorithmType.PN2V]: - # if data.transforms is None: - # raise ValueError( - # f"Algorithm {data.algorithm_type} requires a masking strategy." - # "Please add ManipulateN2V to transforms." - # ) - # else: - # if "ManipulateN2V" not in data.transforms: - # raise ValueError( - # f"Algorithm {data.algorithm_type} requires a masking strategy." - # "Please add ManipulateN2V to transforms." - # ) - # elif "ManipulateN2V" in data.transforms: - # raise ValueError( - # f"Algorithm {data.algorithm_type} doesn't require a masking strategy." - # "Please remove ManipulateN2V from the image or patch_transform." - # ) - # if ( - # data.loss == Loss.PN2V or data.loss == Loss.HDN - # ) and data.noise_model is None: - # raise ValueError(f"Loss {data.loss.upper()} requires a noise model.") - - # if data.loss in [Loss.N2V, Loss.MAE, Loss.MSE] and data.noise_model is not None: - # raise ValueError( - # f"Loss {data.loss.upper()} does not support a noise model." - # ) - # if data.loss == Loss.N2V and data.algorithm_type != AlgorithmType.N2V: - # raise ValueError( - # f"Loss {data.loss.upper()} is only supported by " - # f"{AlgorithmType.N2V}." - # ) - - # return data diff --git a/src/careamics/config/algorithm_model.py b/src/careamics/config/algorithm_model.py new file mode 100644 index 00000000..9b2b5c1d --- /dev/null +++ b/src/careamics/config/algorithm_model.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +from typing import Literal, Union + +from pydantic import BaseModel, ConfigDict, Field, model_validator + +from .architectures import CustomModel, UNetModel, VAEModel +from .optimizer_models import LrSchedulerModel, OptimizerModel + + +class AlgorithmModel(BaseModel): + """Pydantic model describing CAREamics' algorithm. + + # TODO + """ + + # Pydantic class configuration + model_config = ConfigDict( + protected_namespaces=(), # allows to use model_* as a field name + validate_assignment=True, + ) + + # Mandatory fields + algorithm: Literal["n2v", "n2v2", "structn2v", "custom"] + loss: Literal["n2v", "mae", "mse"] + model: Union[UNetModel, VAEModel, CustomModel] = Field(discriminator="architecture") + + # Optional fields + optimizer: OptimizerModel = OptimizerModel() + lr_scheduler: LrSchedulerModel = LrSchedulerModel() + + @model_validator(mode="after") + def algorithm_cross_validation(self) -> AlgorithmModel: + """Validate the algorithm model based on `algorithm`. + + N2V: + - loss must be n2v + - model must be a `UNetModel` + - model n2v2 parameters must be `False` + + N2V2: + - loss must be `n2v` + - model must be a `UnetModel` + - model n2v2 parameters must be `True` + + StructN2V: + - loss must be `n2v` + - model must be a `UNetModel` + """ + # N2V + if self.algorithm == "n2v": + if self.loss != "n2v": + raise ValueError( + f"Algorithm {self.algorithm} only supports loss `n2v`." + ) + + if not isinstance(self.model, UNetModel): + raise ValueError( + f"Model for algorithm {self.algorithm} must be a `UNetModel`." + ) + + if self.model.n2v2: + raise ValueError( + f"Model for algorithm {self.algorithm} must have `n2v2` parameters " + f"set to `False`." + ) + + # N2V2 + elif self.algorithm == "n2v2": + if self.loss != "n2v": + raise ValueError( + f"Algorithm {self.algorithm} only supports loss `n2v`." + ) + + if not isinstance(self.model, UNetModel): + raise ValueError( + f"Model for algorithm {self.algorithm} must be a `UNetModel`." + ) + + if not self.model.n2v2: + raise ValueError( + f"Model for algorithm {self.algorithm} must have `n2v2` parameters " + f"set to `True`." + ) + + # StructN2V + elif self.algorithm == "structn2v": + if self.loss != "n2v": + raise ValueError( + f"Algorithm {self.algorithm} only supports loss `n2v`." + ) + + if not isinstance(self.model, UNetModel): + raise ValueError( + f"Model for algorithm {self.algorithm} must be a `UNetModel`." + ) + + if isinstance(self.model, VAEModel): + raise ValueError("VAE are currently not implemented.") + + return self diff --git a/src/careamics/config/architectures/__init__.py b/src/careamics/config/architectures/__init__.py index e26f0d50..145264e2 100644 --- a/src/careamics/config/architectures/__init__.py +++ b/src/careamics/config/architectures/__init__.py @@ -1,3 +1,4 @@ +from .custom_model import CustomModel +from .register_model import clear_custom_models, get_custom_model, register_model from .unet_model import UNetModel from .vae_model import VAEModel -from .custom_model import CustomModel \ No newline at end of file diff --git a/src/careamics/config/architectures/custom_model.py b/src/careamics/config/architectures/custom_model.py index ba8288d0..c4456dae 100644 --- a/src/careamics/config/architectures/custom_model.py +++ b/src/careamics/config/architectures/custom_model.py @@ -1,21 +1,43 @@ from typing import Literal -from pydantic import ( - BaseModel, - ConfigDict -) +from pydantic import BaseModel, ConfigDict, field_validator +from torch.nn import Module + +from .register_model import get_custom_model -# TODO: decorator to register custom model -# https://stackoverflow.com/questions/3054372/auto-register-class-methods-using-decorator -class CustomModel(BaseModel): +class CustomModel(BaseModel): # pydantic model config - model_config = ConfigDict( - validate_assignment=True - ) + model_config = ConfigDict(validate_assignment=True) # discriminator used for choosing the pydantic model in Model architecture: Literal["Custom"] + # name of the custom model + name: str + # parameters parameters: dict = {} + + @field_validator("name") + @classmethod + def custom_model_is_known(cls, value: str) -> str: + """Check whether the custom model is known. + + Parameters + ---------- + value : str + Name of the custom model as registered using the `@register_model` + decorator. + """ + # delegate error to get_custom_model + model = get_custom_model(value) + + # check if it is a torch Module subclass + if not issubclass(model, Module): + raise ValueError( + f'Retrieved class {model} with name "{value}" is not a ' + f"torch.nn.Module subclass." + ) + + return value diff --git a/src/careamics/config/architectures/register_model.py b/src/careamics/config/architectures/register_model.py new file mode 100644 index 00000000..6e62cbce --- /dev/null +++ b/src/careamics/config/architectures/register_model.py @@ -0,0 +1,98 @@ +from typing import Callable + +from torch.nn import Module + +CUSTOM_MODELS = {} # dictionary of custom models {"name": __class__} + + +def register_model(name: str) -> Callable: + """Decorator used to register a torch.nn.Module class with a given `name`. + + Parameters + ---------- + name : str + Name of the model. + + Returns + ------- + Callable + Function allowing to instantiate the wrapped Module class. + + Raises + ------ + ValueError + If a model is already registered with that name. + + Examples + -------- + ```python + @register_model(name="linear") + class LinearModel(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + + self.weight = nn.Parameter(ones(in_features, out_features)) + self.bias = nn.Parameter(ones(out_features)) + + def forward(self, input): + return (input @ self.weight) + self.bias + ``` + """ + if name in CUSTOM_MODELS: + raise ValueError( + f"Model {name} already exists. Choose a different name or run " + f"`clear_custom_models()` to empty the registry." + ) + + def add_custom_model(model: Module) -> Module: + """Add a custom model to the registry and return it. + + Parameters + ---------- + model : Module + Module class to register + + Returns + ------- + Module + The registered model. + """ + # add model to the registry + CUSTOM_MODELS[name] = model + + return model + + return add_custom_model + + +def get_custom_model(name: str) -> Module: + """Get the custom model corresponding to `name` from the registry. + + Parameters + ---------- + name : str + Name of the model to retrieve. + + Returns + ------- + Module + The requested model. + + Raises + ------ + ValueError + If the model is not registered. + """ + if name not in CUSTOM_MODELS: + raise ValueError( + f"Model {name} is unknown. Have you registered it using " + f'@register_model("{name}") as decorator?' + ) + + return CUSTOM_MODELS[name] + + +def clear_custom_models() -> None: + """Clear the custom models registry.""" + # clear dictionary + CUSTOM_MODELS.clear() diff --git a/src/careamics/config/architectures/unet_model.py b/src/careamics/config/architectures/unet_model.py index 7b3cf158..ca3257a9 100644 --- a/src/careamics/config/architectures/unet_model.py +++ b/src/careamics/config/architectures/unet_model.py @@ -2,7 +2,7 @@ from typing import Literal -from pydantic import BaseModel, ConfigDict, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator # TODO tests activation <-> pydantic model, test the literals! @@ -21,30 +21,25 @@ class UNetModel(BaseModel): """ # pydantic model config - model_config = ConfigDict( - validate_assignment=True - ) + model_config = ConfigDict(validate_assignment=True) # discriminator used for choosing the pydantic model in Model architecture: Literal["UNet"] # parameters # validate_defaults allow ignoring default values in the dump if they were not set - conv_dims : int = Field(default=2, ge=2, le=3, validate_default=True) + conv_dims: int = Field(default=2, ge=2, le=3, validate_default=True) num_classes: int = Field(default=1, ge=1, validate_default=True) in_channels: int = Field(default=1, ge=1, validate_default=True) depth: int = Field(default=2, ge=1, le=10, validate_default=True) num_channels_init: int = Field(default=32, ge=8, le=1024, validate_default=True) final_activation: Literal[ - "None", - "Sigmoid", - "Softmax", - "Tanh", - "ReLU", - "LeakyReLU"] = Field(default="None", validate_default=True) + "None", "Sigmoid", "Softmax", "Tanh", "ReLU", "LeakyReLU" + ] = Field(default="None", validate_default=True) n2v2: bool = Field(default=False, validate_default=True) - @validator("num_channels_init") + @field_validator("num_channels_init") + @classmethod def validate_num_channels_init(cls, num_channels_init: int) -> int: """ Validate that num_channels_init is even. diff --git a/src/careamics/config/architectures/vae_model.py b/src/careamics/config/architectures/vae_model.py index 0647c8a1..594d6c15 100644 --- a/src/careamics/config/architectures/vae_model.py +++ b/src/careamics/config/architectures/vae_model.py @@ -7,14 +7,12 @@ class VAEModel(BaseModel): - model_config = ConfigDict( use_enum_values=True, protected_namespaces=(), validate_assignment=True ) architecture: Literal["VAE"] - def set_3D(self, is_3D: bool) -> None: """ Set 3D model by setting the `conv_dims` parameters. @@ -26,7 +24,6 @@ def set_3D(self, is_3D: bool) -> None: """ raise NotImplementedError("VAE is not implemented yet.") - def is_3D(self) -> bool: """ Return whether the model is 3D or not. @@ -36,4 +33,4 @@ def is_3D(self) -> bool: bool Whether the model is 3D or not. """ - raise NotImplementedError("VAE is not implemented yet.") \ No newline at end of file + raise NotImplementedError("VAE is not implemented yet.") diff --git a/src/careamics/config/config.py b/src/careamics/config/configuration_model.py similarity index 74% rename from src/careamics/config/config.py rename to src/careamics/config/configuration_model.py index a1759a78..99969116 100644 --- a/src/careamics/config/config.py +++ b/src/careamics/config/configuration_model.py @@ -13,11 +13,14 @@ model_validator, ) -from .algorithm import AlgorithmModel -from .data import DataModel -from .training import Training +from .algorithm_model import AlgorithmModel +from .data_model import DataModel +from .support import SupportedAlgorithm, SupportedTransform +from .training_model import Training +from .transform_model import TransformModel +# TODO what do we expect in terms of model dump, with or without the defaults? class Configuration(BaseModel): """ CAREamics configuration. @@ -47,34 +50,15 @@ class Configuration(BaseModel): # required parameters experiment_name: str - - # TODO consider using DirectoryPath instead - working_directory: Path + working_directory: Union[str, Path] # Sub-configurations algorithm: AlgorithmModel data: DataModel training: Training - def set_3D(self, is_3D: bool, axes: str) -> None: - """ - Set 3D flag and axes. - - Parameters - ---------- - is_3D : bool - Whether the algorithm is 3D or not. - axes : str - Axes of the data. - """ - # set the flag and axes (this will not trigger validation at the config level) - self.algorithm.model.set_3D(is_3D) - self.data.axes = axes - - # cheap hack: trigger validation - self.algorithm = self.algorithm - @field_validator("experiment_name") + @classmethod def no_symbol(cls, name: str) -> str: """ Validate experiment name. @@ -111,6 +95,7 @@ def no_symbol(cls, name: str) -> str: return name @field_validator("working_directory") + @classmethod def parent_directory_exists(cls, workdir: Union[str, Path]) -> Path: """ Validate working directory. @@ -152,18 +137,13 @@ def parent_directory_exists(cls, workdir: Union[str, Path]) -> Path: return path @model_validator(mode="after") - def validate_3D(cls, config: Configuration) -> Configuration: + def validate_3D(self) -> Configuration: """ Check 3D flag validity. Check that the algorithm is_3D flag is compatible with the axes in the data configuration. - Parameters - ---------- - config : Configuration - Configuration to validate. - Returns ------- Configuration @@ -176,25 +156,83 @@ def validate_3D(cls, config: Configuration) -> Configuration: not 3D but the data axes are. """ # check that is_3D and axes are compatible - if config.algorithm.model.is_3D() and "Z" not in config.data.axes: + if self.algorithm.model.is_3D() and "Z" not in self.data.axes: raise ValueError( - f"Algorithm is 3D but data axes are not (got axes {config.data.axes})." + f"Algorithm is 3D but data axes are not (got axes {self.data.axes})." ) - elif not config.algorithm.model.is_3D() and "Z" in config.data.axes: + elif not self.algorithm.model.is_3D() and "Z" in self.data.axes: raise ValueError( - f"Algorithm is not 3D but data axes are (got axes {config.data.axes})." + f"Algorithm is not 3D but data axes are (got axes {self.data.axes})." ) - return config + return self + + @model_validator(mode="after") + def validate_algorithm_and_data(self) -> Configuration: + """Validate the algorithm and data configurations. + + In particular, the choice of algorithm will influantiate the potential + transforms that can be applied to the data. + + - (struct)N2V(2): requires ManipulateN2V to be the last transform. + + Returns + ------- + Configuration + Validated configuration + """ + # TODO the first if will need to change once we have more than (struct)N2V(2) + if not self.algorithm.algorithm == SupportedAlgorithm.CUSTOM: + if isinstance(self.data.transforms, list): + transform_list = [t.name for t in self.data.transforms] + + # missing MANIPULATE_N2V + if SupportedTransform.MANIPULATE_N2V not in transform_list: + self.data.transforms.append( + TransformModel(name=SupportedTransform.MANIPULATE_N2V) + ) + + # multiple MANIPULATE_N2V + elif transform_list.count(SupportedTransform.MANIPULATE_N2V) > 1: + raise ValueError( + f"Multiple {SupportedTransform.MANIPULATE_N2V} transforms are not " + f"allowed." + ) + + # MANIPULATE_N2V not the last transform + elif transform_list[-1] != SupportedTransform.MANIPULATE_N2V: + index = transform_list.index(SupportedTransform.MANIPULATE_N2V) + transform = self.data.transforms.pop(index) + self.data.transforms.append(transform) + + return self + + def set_3D(self, is_3D: bool, axes: str) -> None: + """ + Set 3D flag and axes. + + Parameters + ---------- + is_3D : bool + Whether the algorithm is 3D or not. + axes : str + Axes of the data. + """ + # set the flag and axes (this will not trigger validation at the config level) + self.algorithm.model.set_3D(is_3D) + self.data.axes = axes + + # cheap hack: trigger validation + self.algorithm = self.algorithm def model_dump( self, - exclude_defaults: bool = True, + exclude_defaults: bool = False, # TODO is this what we want? exclude_none: bool = True, **kwargs: Dict, ) -> Dict: """ - Override model_dump method in order to set default values for optional fields. + Override model_dump method in order to set default values. Parameters ---------- @@ -215,6 +253,9 @@ def model_dump( exclude_none=exclude_none, exclude_defaults=exclude_defaults, **kwargs ) + # change Path into str + dictionary["working_directory"] = str(dictionary["working_directory"]) + return dictionary diff --git a/src/careamics/config/data.py b/src/careamics/config/data.py deleted file mode 100644 index 6b925e78..00000000 --- a/src/careamics/config/data.py +++ /dev/null @@ -1,194 +0,0 @@ -"""Data configuration.""" -from __future__ import annotations - -from typing import List, Literal, Optional, Union - -from albumentations import Compose -from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator - -from ..utils import check_axes_validity -from .transform import TransformModel - - -class DataModel(BaseModel): - """ - Data configuration. - - If std is specified, mean must be specified as well. Note that setting the std first - and then the mean (if they were both `None` before) will raise a validation error. - Prefer instead the following: - >>> set_mean_and_std(mean, std) - - Attributes - ---------- - in_memory : bool - Whether to load the data in memory or not. - data_format : SupportedExtension - Extension of the data, without period. - axes : str - Axes of the data. - mean: Optional[float] - Expected data mean. - std: Optional[float] - Expected data standard deviation. - """ - - # Pydantic class configuration - model_config = ConfigDict( - validate_assignment=True, - arbitrary_types_allowed=True, - ) - # DATASET CONFIGURATION - # Mandatory fields - data_type: Literal["array", "tiff", "zarr", "custom"] - patch_size: List[int] = Field(..., min_length=2, max_length=3) - - axes: str - - # Optional fields - mean: Optional[float] = Field(default=None, ge=0) - std: Optional[float] = Field(default=None, gt=0) - - # TODO need better validation for that one - transforms: Optional[Union[List[TransformModel], Compose]] = Field( - default=[], validate_default=True - ) - - # DATA LOADER CONFIGURATION - batch_size: int = Field(default=1, ge=1) - num_workers: Optional[int] = Field(default=0, ge=0) - pin_memory: Optional[bool] = Field(default=False) - - @field_validator("patch_size") - def all_elements_non_zero_even(cls, patch_list: List[int]) -> List[int]: - """ - Validate patch size. - - Patch size must be non-zero, positive and even. - - Parameters - ---------- - patch_list : List[int] - Patch size. - - Returns - ------- - List[int] - Validated patch size. - - Raises - ------ - ValueError - If the patch size is 0. - ValueError - If the patch size is not even. - """ - for dim in patch_list: - if dim < 1: - raise ValueError(f"Patch size must be non-zero positive (got {dim}).") - - if dim % 2 != 0: - raise ValueError(f"Patch size must be even (got {dim}).") - - return patch_list - - @field_validator("axes") - def axes_valid(cls, axes: str) -> str: - """ - Validate axes. - - Axes must: - - be a combination of 'STCZYX' - - not contain duplicates - - contain at least 2 contiguous axes: X and Y - - contain at most 4 axes - - not contain both S and T axes - - Parameters - ---------- - axes : str - Axes to validate. - - Returns - ------- - str - Validated axes. - - Raises - ------ - ValueError - If axes are not valid. - """ - # Validate axes - check_axes_validity(axes) - - return axes - - def set_mean_and_std(self, mean: float, std: float) -> None: - """ - Set mean and standard deviation of the data. - - This method is preferred to setting the fields directly, as it ensures that the - mean is set first, then the std; thus avoiding a validation error to be thrown. - - Parameters - ---------- - mean : float - Mean of the data. - std : float - Standard deviation of the data. - """ - self.mean = mean - self.std = std - - # @model_validator(mode="after") - # @classmethod - # def validate_dataset_to_be_used(cls, data: Data) -> Any: - # """Validate that in_memory dataset is used correctly. - - # Parameters - # ---------- - # data : Configuration - # Configuration to validate. - - # Raises - # ------ - # ValueError - # If in_memory dataset is used with Zarr storage. - # If axes are not valid. - # """ - # # TODO: why not? What if it is a small Zarr... - # if data.in_memory and data.data_format == SupportedExtension.ZARR: - # raise ValueError("Zarr storage can't be used with in_memory dataset.") - - # return data - - # TODO is there a more elegant way? We could have an optional pydantic model with both specified!! - @model_validator(mode="after") - def std_only_with_mean(cls, data_model: DataModel) -> DataModel: - """ - Check that mean and std are either both None, or both specified. - - If we enforce both None or both specified, we cannot set the values one by one - due to the ConfDict enforcing the validation on assignment. Therefore, we check - only when the std is not None and the mean is None. - - Parameters - ---------- - data_model : Data - Data model. - - Returns - ------- - Data - Validated data model. - - Raises - ------ - ValueError - If std is not None and mean is None. - """ - if data_model.std is not None and data_model.mean is None: - raise ValueError("Cannot have std non None if mean is None.") - - return data_model diff --git a/src/careamics/config/data_model.py b/src/careamics/config/data_model.py new file mode 100644 index 00000000..34be3dd7 --- /dev/null +++ b/src/careamics/config/data_model.py @@ -0,0 +1,253 @@ +"""Data configuration.""" +from __future__ import annotations + +from typing import List, Literal, Optional, Union + +from albumentations import Compose +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from careamics.utils import check_axes_validity + +from .support import SupportedTransform +from .transform_model import TransformModel + + +class DataModel(BaseModel): + """ + Data configuration. + + If std is specified, mean must be specified as well. Note that setting the std first + and then the mean (if they were both `None` before) will raise a validation error. + Prefer instead the following: + >>> set_mean_and_std(mean, std) + """ + + # Pydantic class configuration + model_config = ConfigDict( + validate_assignment=True, + arbitrary_types_allowed=True, + ) + + # Dataset configuration + # Mandatory fields + mode: Literal["train", "predict"] + data_type: Literal["array", "tiff", "custom"] + patch_size: List[int] = Field(..., min_length=2, max_length=3) + + axes: str + + # Optional fields + mean: Optional[float] = None + std: Optional[float] = None + + transforms: Union[List[TransformModel], Compose] = Field( + default=[ + { + "name": SupportedTransform.NDFLIP.value, + }, + { + "name": SupportedTransform.XY_RANDOM_ROTATE90.value, + }, + { + "name": SupportedTransform.NORMALIZE.value, + }, + { + "name": SupportedTransform.N2V_MANIPULATE_UNIFORM.value, + }, + ], + validate_default=True, + ) # TODO defaults should change based on the train/predict and on the algorithm + + # Dataloader configuration + batch_size: int = Field(default=1, ge=1, validate_default=True) + num_workers: int = Field(default=0, ge=0, validate_default=True) + pin_memory: bool = Field(default=False, validate_default=True) + + @field_validator("patch_size") + @classmethod + def all_elements_non_zero_even(cls, patch_list: List[int]) -> List[int]: + """ + Validate patch size. + + Patch size must be non-zero, positive and even. + + Parameters + ---------- + patch_list : List[int] + Patch size. + + Returns + ------- + List[int] + Validated patch size. + + Raises + ------ + ValueError + If the patch size is 0. + ValueError + If the patch size is not even. + """ + for dim in patch_list: + if dim < 1: + raise ValueError(f"Patch size must be non-zero positive (got {dim}).") + + if dim % 2 != 0: + raise ValueError(f"Patch size must be even (got {dim}).") + + return patch_list + + @field_validator("axes") + @classmethod + def axes_valid(cls, axes: str) -> str: + """ + Validate axes. + + Axes must: + - be a combination of 'STCZYX' + - not contain duplicates + - contain at least 2 contiguous axes: X and Y + - contain at most 4 axes + - not contain both S and T axes + + Parameters + ---------- + axes : str + Axes to validate. + + Returns + ------- + str + Validated axes. + + Raises + ------ + ValueError + If axes are not valid. + """ + # Validate axes + check_axes_validity(axes) + + return axes + + @model_validator(mode="after") + def std_only_with_mean(cls, data_model: DataModel) -> DataModel: + """ + Check that mean and std are either both None, or both specified. + + If we enforce both None or both specified, we cannot set the values one by one + due to the ConfDict enforcing the validation on assignment. Therefore, we check + only when the std is not None and the mean is None. + + Parameters + ---------- + data_model : Data + Data model. + + Returns + ------- + Data + Validated data model. + + Raises + ------ + ValueError + If std is not None and mean is None. + """ + if data_model.std is not None and data_model.mean is None: + raise ValueError("Cannot have `std` field if `mean` is None.") + + return data_model + + def has_tranform_list(self) -> bool: + """ + Check if the transforms are a list, as opposed to a Compose object. + + Returns + ------- + bool + True if the transforms are a list, False otherwise. + """ + return isinstance(self.transforms, list) + + def set_mean_and_std(self, mean: float, std: float) -> None: + """ + Set mean and standard deviation of the data. + + This method is preferred to setting the fields directly, as it ensures that the + mean is set first, then the std; thus avoiding a validation error to be thrown. + + Parameters + ---------- + mean : float + Mean of the data. + std : float + Standard deviation of the data. + """ + self.mean = mean + self.std = std + + # search in the transforms for Normalize and update parameters + if not isinstance(self.transforms, Compose): + for transform in self.transforms: + if transform.name == SupportedTransform.NORMALIZE.value: + transform.parameters["mean"] = mean + transform.parameters["std"] = std + transform.parameters["max_pixel_value"] = 1.0 + else: + raise ValueError( + "Setting mean and std with Compose transforms is not allowed." + ) + + @model_validator(mode="after") + def validate_transforms_and_axes(cls, data_model: DataModel) -> DataModel: + """ + Validate the transforms with respect to the axes. + + Parameters + ---------- + data_model : DataModel + Data model. + + Returns + ------- + DataModel + Validated data model. + + Raises + ------ + ValueError + If the transforms are not valid. + """ + # check that the transforms NDFLip is 3D + + if "Z" in data_model.axes: + if data_model.has_tranform_list(): + for transform in data_model.transforms: + if transform.name == SupportedTransform.NDFLIP: + transform.parameters["is_3D"] = True + elif transform.name == SupportedTransform.XY_RANDOM_ROTATE90: + transform.parameters["is_3D"] = True + else: + if data_model.has_tranform_list(): + for transform in data_model.transforms: + if transform.name == SupportedTransform.NDFLIP: + transform.parameters["is_3D"] = False + elif transform.name == SupportedTransform.XY_RANDOM_ROTATE90: + transform.parameters["is_3D"] = False + + if data_model.mode == "predict": + for transform in data_model.transforms: + if transform.name == "Normalize": + transform.parameters["mean"] = data_model.mean + transform.parameters["std"] = data_model.std + transform.parameters["max_pixel_value"] = 1.0 + else: + data_model.transforms.remove(transform) + else: + for transform in data_model.transforms: + if transform.name == "Normalize": + transform.parameters["mean"] = data_model.mean + transform.parameters["std"] = data_model.std + transform.parameters["max_pixel_value"] = 1.0 + return data_model diff --git a/src/careamics/config/noise_models.py b/src/careamics/config/noise_models.py index 9758d540..6dd01fa4 100644 --- a/src/careamics/config/noise_models.py +++ b/src/careamics/config/noise_models.py @@ -20,6 +20,7 @@ class NoiseModelType(str, Enum): HIST = "hist" GMM = "gmm" + # TODO add validator decorator @classmethod def validate_noise_model_type( cls, noise_model: Union[str, NoiseModel], parameters: dict @@ -80,6 +81,7 @@ class NoiseModel(BaseModel): parameters: Dict = Field(default_factory=dict, validate_default=True) @field_validator("parameters") + @classmethod def validate_parameters(cls, data, values) -> Dict: """_summary_. diff --git a/src/careamics/config/optimizers.py b/src/careamics/config/optimizer_models.py similarity index 82% rename from src/careamics/config/optimizers.py rename to src/careamics/config/optimizer_models.py index 6bf974cf..e3ff6825 100644 --- a/src/careamics/config/optimizers.py +++ b/src/careamics/config/optimizer_models.py @@ -13,7 +13,8 @@ from torch import optim from careamics.utils.torch_utils import filter_parameters -from .support import SupportedOptimizer, SupportedScheduler + +from .support import SupportedOptimizer class OptimizerModel(BaseModel): @@ -47,11 +48,12 @@ class OptimizerModel(BaseModel): parameters: dict = Field( default={ "lr": 1e-4, - }, - validate_default=True + }, + validate_default=True, ) @field_validator("parameters") + @classmethod def filter_parameters(cls, user_params: dict, values: ValidationInfo) -> Dict: """ Validate optimizer parameters. @@ -83,17 +85,16 @@ def filter_parameters(cls, user_params: dict, values: ValidationInfo) -> Dict: # filter the user parameters according to the optimizer's signature parameters = filter_parameters(optimizer_class, user_params) - # TODO warn about unused parameters - return parameters - - # TODO in PyTorch 2.2 the lr is not necessary for SGD. Pin version and delete this validator? @model_validator(mode="after") + @classmethod def sgd_lr_parameter(cls, optimizer: OptimizerModel) -> OptimizerModel: """ Check that SGD optimizer has the mandatory `lr` parameter specified. + This is specific for PyTorch < 2.2. + Parameters ---------- optimizer : Optimizer @@ -120,7 +121,6 @@ def sgd_lr_parameter(cls, optimizer: OptimizerModel) -> OptimizerModel: return optimizer - class LrSchedulerModel(BaseModel): """ @@ -153,15 +153,37 @@ class LrSchedulerModel(BaseModel): parameters: dict = Field(default={}, validate_default=True) @field_validator("parameters") + @classmethod def filter_parameters(cls, user_params: dict, values: ValidationInfo) -> Dict: + """Filter parameters based on the learning rate scheduler's signature. + + Parameters + ---------- + user_params : dict + User parameters. + values : ValidationInfo + Pydantic field validation info, used to get the scheduler name. + + Returns + ------- + Dict + Filtered scheduler parameters. + Raises + ------ + ValueError + If the scheduler is StepLR and the step_size parameter is not specified. + """ # retrieve the corresponding scheduler class scheduler_class = getattr(optim.lr_scheduler, values.data["name"]) # filter the user parameters according to the scheduler's signature parameters = filter_parameters(scheduler_class, user_params) - # TODO warn about unused parameters + if values.data["name"] == "StepLR" and "step_size" not in parameters: + raise ValueError( + "StepLR scheduler requires `step_size` parameter, check that it has " + "correctly been specified in `parameters`." + ) return parameters - diff --git a/src/careamics/config/pixel_masking.py b/src/careamics/config/pixel_masking.py deleted file mode 100644 index 26a44b2e..00000000 --- a/src/careamics/config/pixel_masking.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Dict, Union - -from pydantic import BaseModel, ConfigDict, Field, field_validator - -# TODO where is all this used? -class MaskingStrategyType(str, Enum): - """Available masking strategy. - - Currently supported strategies: - - - default: default masking strategy of Noise2Void (uniform sampling of neighbors). - - median: median masking strategy of N2V2. - """ - - NONE = "none" - DEFAULT = "default" - MEDIAN = "median" - - @classmethod - def validate_masking_strategy_type( - cls, masking_strategy: Union[str, MaskingStrategy], parameters: dict - ) -> None: - """Validate masking strategy and its parameters. - - Returns - ------- - MaskingStrategy - Validated masking strategy. - - Raises - ------ - ValueError - If the masking strategy is not supported. - """ - if masking_strategy not in [ - MaskingStrategyType.NONE, - MaskingStrategyType.DEFAULT, - MaskingStrategyType.MEDIAN, - ]: - raise ValueError( - f"Incorrect value for asking strategy {masking_strategy}." - f"Please refer to the documentation" # TODO add link to documentation - ) - if masking_strategy == MaskingStrategyType.DEFAULT: - DefaultMaskingStrategy(**parameters) - return ( - DefaultMaskingStrategy().model_dump() if not parameters else parameters - ) - elif masking_strategy == MaskingStrategyType.MEDIAN: - MedianMaskingStrategy(**parameters) - return ( - MedianMaskingStrategy().model_dump() if not parameters else parameters - ) - - -class MaskingStrategy(BaseModel): - """_summary_. - - _extended_summary_ - - Parameters - ---------- - BaseModel : _type_ - _description_ - """ - - model_config = ConfigDict( - use_enum_values=True, - protected_namespaces=(), # allows to use model_* as a field name - validate_assignment=True, - ) - - strategy_type: MaskingStrategyType - parameters: Dict = Field(default_factory=dict, validate_default=True) - - @field_validator("parameters") - def validate_parameters(cls, data, values) -> Dict: - """_summary_. - - Parameters - ---------- - parameters : Dict - _description_ - - Returns - ------- - Dict - _description_ - """ - if values.data["strategy_type"] not in [ - MaskingStrategyType.DEFAULT, - MaskingStrategyType.MEDIAN, - ]: - raise ValueError( - f"Incorrect masking strategy {values.data['strategy_type']}." - f"Please refer to the documentation" # TODO add link to documentation - ) - parameters = MaskingStrategyType.validate_masking_strategy_type( - values.data["strategy_type"], data - ) - return parameters - - # def model_dump( - # self, exclude_optionals: bool = True, *args: List, **kwargs: Dict - # ) -> Dict: - # """ - # Override model_dump method. - - # The purpose is to ensure export smooth import to yaml. It includes: - # - remove entries with None value. - # - remove optional values if they have the default value. - - # Parameters - # ---------- - # exclude_optionals : bool, optional - # Whether to exclude optional arguments if they are default, by default True. - # *args : List - # Positional arguments, unused. - # **kwargs : Dict - # Keyword arguments, unused. - - # Returns - # ------- - # Dict - # Dictionary representation of the model. - # """ - # dictionary = super().model_dump(exclude_none=True) - - # if exclude_optionals is True: - # # remove optional arguments if they are default - # defaults = { - # "model": { - # "architecture": "UNet", - # "parameters": {"depth": 2, "num_channels_init": 32}, - # }, - # # MaskingStrategy() - # "masking_strategy": { - # "strategy_type": "default", - # "parameters": {"masked_pixel_percentage": 0.2, "roi_size": 11}, - # }, - # } - - # remove_default_optionals(dictionary, defaults) - - # return dictionary - - -class DefaultMaskingStrategy(BaseModel): - """Default masking strategy of Noise2Void. - - Parameters - ---------- - masked_pixel_percentage : float - Percentage of pixels to be masked. - roi_size : float - Size of the region of interest (ROI). - """ - - masked_pixel_percentage: float = Field(default=0.2, ge=0.01, le=21.0) - roi_size: float = Field(default=11, ge=3, le=21) - - -class MedianMaskingStrategy(BaseModel): - """Median masking strategy of N2V2. - - Parameters - ---------- - masked_pixel_percentage : float - Percentage of pixels to be masked. - roi_size : float - Size of the region of interest (ROI). - """ - - masked_pixel_percentage: float = Field(default=0.2, ge=0.01, le=21.0) - roi_size: float = Field(default=11, ge=3, le=21) diff --git a/src/careamics/config/support/__init__.py b/src/careamics/config/support/__init__.py index cc376fc0..f951da73 100644 --- a/src/careamics/config/support/__init__.py +++ b/src/careamics/config/support/__init__.py @@ -1,5 +1,23 @@ -from .supported_architectures import SupportedArchitecture +__all__ = [ + "SupportedArchitecture", + "SupportedActivation", + "SupportedOptimizer", + "SupportedScheduler", + "SupportedLoss", + "SupportedAlgorithm", + "SupportedPixelManipulation", + "SupportedTransform", + "SupportedData", + "get_all_transforms", +] + + from .supported_activations import SupportedActivation -from .supported_optimizers import SupportedOptimizer, SupportedScheduler +from .supported_algorithms import SupportedAlgorithm +from .supported_architectures import SupportedArchitecture +from .supported_data import SupportedData +from .supported_extraction_strategies import SupportedExtractionStrategy from .supported_losses import SupportedLoss -from .supported_algorithms import SupportedAlgorithm \ No newline at end of file +from .supported_optimizers import SupportedOptimizer, SupportedScheduler +from .supported_pixel_manipulations import SupportedPixelManipulation +from .supported_transforms import SupportedTransform, get_all_transforms diff --git a/src/careamics/config/support/supported_activations.py b/src/careamics/config/support/supported_activations.py index 865d3ac4..d7c84ae3 100644 --- a/src/careamics/config/support/supported_activations.py +++ b/src/careamics/config/support/supported_activations.py @@ -1,6 +1,20 @@ -from enum import Enum +from careamics.utils import BaseEnum -class SupportedActivation(str, Enum): + +class SupportedActivation(str, BaseEnum): + """Supported activation functions. + + - None, no activation will be used. + - Sigmoid + - Softmax + - Tanh + - ReLU + - LeakyReLU + + All activations are defined in PyTorch. + + See: https://pytorch.org/docs/stable/nn.html#loss-functions + """ NONE = "None" SIGMOID = "Sigmoid" diff --git a/src/careamics/config/support/supported_algorithms.py b/src/careamics/config/support/supported_algorithms.py index 49166f0b..fb040aa5 100644 --- a/src/careamics/config/support/supported_algorithms.py +++ b/src/careamics/config/support/supported_algorithms.py @@ -1,19 +1,31 @@ -from enum import Enum +from typing import List -# python 3.11: https://docs.python.org/3/library/enum.html -class SupportedAlgorithm(str, Enum): - """ - Available types of algorithms. +from careamics.utils import BaseEnum - Currently supported algorithms: - - n2v - """ - # CARE = "care" +class SupportedAlgorithm(str, BaseEnum): + """Algorithms available in CAREamics. + + - n2v: a self-supervised algorithm using blind-spot training to denoise + images, Krull et al., CVF (2019). + - n2v2: an iteration of N2V that removes checkboard artefacts, Hoeck et al., + ECCV (2022) + - structn2v: an iteration of N2V that uses a mask to remove horizontal or vertical + structured noise, Broaddus et al., ISBI (ISBI). + - custom: Custom algorithm, allows tuning CAREamics parameters without constraints. + """ + N2V = "n2v" - N2V2 = "n2v2" # TODO to decide whether to remove this + N2V2 = "n2v2" + STRUCTN2V = "structn2v" + CUSTOM = "custom" + # CARE = "care" # N2N = "n2n" # PN2V = "pn2v" # HDN = "hdn" - # CUSTOM = "custom" - # SEGM = "segmentation" + # SEG = "segmentation" + + @classmethod + def get_unsupervised_algorithms(cls) -> List[str]: + """Return all unsupervised algorithms.""" + return [cls.N2V.value, cls.N2V2.value, cls.STRUCTN2V.value] diff --git a/src/careamics/config/support/supported_architectures.py b/src/careamics/config/support/supported_architectures.py index 7d5c826c..2747470a 100644 --- a/src/careamics/config/support/supported_architectures.py +++ b/src/careamics/config/support/supported_architectures.py @@ -1,7 +1,18 @@ -from enum import Enum +from careamics.utils import BaseEnum + + +class SupportedArchitecture(str, BaseEnum): + """Supported architectures. + + # TODO add details, in particular where to find the API for the models + + - UNet: classical UNet compatible with N2V2 + - VAE: variational Autoencoder + - Custom: custom model registered with `@register_model` decorator + """ -class SupportedArchitecture(str, Enum): - UNET = "UNet" VAE = "VAE" CUSTOM = "Custom" + # HVAE? + # CUSTOM = "Custom" # TODO create mechanism for that diff --git a/src/careamics/config/support/supported_extensions.py b/src/careamics/config/support/supported_data.py similarity index 51% rename from src/careamics/config/support/supported_extensions.py rename to src/careamics/config/support/supported_data.py index 13d00729..765155f5 100644 --- a/src/careamics/config/support/supported_extensions.py +++ b/src/careamics/config/support/supported_data.py @@ -1,19 +1,15 @@ -from enum import Enum +from __future__ import annotations -# TODO: change into supported_dataset? -class SupportedExtension(str, Enum): - """ - Supported extensions for input data. +from careamics.utils import BaseEnum - Currently supported: - - tif/tiff: .tiff files. - - zarr: zarr array. - """ +class SupportedData(str, BaseEnum): + ARRAY = "array" TIFF = "tiff" - TIF = "tif" + CUSTOM = "custom" # ZARR = "zarr" + # TODO remove? @classmethod def _missing_(cls, value: object) -> str: """ @@ -45,4 +41,28 @@ def _missing_(cls, value: object) -> str: return member # still missing - return super()._missing_(value) \ No newline at end of file + return super()._missing_(value) + + @classmethod + def get_extension(cls, data_type: SupportedData) -> str: + """ + Path.rglob and fnmatch compatible extension. + + Parameters + ---------- + data_type : SupportedData + Data type. + + Returns + ------- + str + Corresponding extension. + """ + if data_type == cls.ARRAY: + raise NotImplementedError(f"Data {data_type} are not loaded from file.") + elif data_type == cls.TIFF: + return "*.tif*" + elif data_type == cls.CUSTOM: + return "*.*" + else: + raise ValueError(f"Data type {data_type} is not supported.") diff --git a/src/careamics/dataset/extraction_strategy.py b/src/careamics/config/support/supported_extraction_strategies.py similarity index 79% rename from src/careamics/dataset/extraction_strategy.py rename to src/careamics/config/support/supported_extraction_strategies.py index dc74199d..d08c513f 100644 --- a/src/careamics/dataset/extraction_strategy.py +++ b/src/careamics/config/support/supported_extraction_strategies.py @@ -3,15 +3,16 @@ This module defines the various extraction strategies available in CAREamics. """ -from enum import Enum +from careamics.utils import BaseEnum -class ExtractionStrategy(str, Enum): +class SupportedExtractionStrategy(str, BaseEnum): """ Available extraction strategies. Currently supported: - random: random extraction. + # TODO - sequential: grid extraction, can miss edge values. - tiled: tiled extraction, covers the whole image. """ @@ -20,3 +21,4 @@ class ExtractionStrategy(str, Enum): RANDOM_ZARR = "random_zarr" SEQUENTIAL = "sequential" TILED = "tiled" + NONE = "none" diff --git a/src/careamics/config/support/supported_losses.py b/src/careamics/config/support/supported_losses.py index f23ae18a..eeef54cd 100644 --- a/src/careamics/config/support/supported_losses.py +++ b/src/careamics/config/support/supported_losses.py @@ -1,16 +1,8 @@ -from enum import Enum +from careamics.utils import BaseEnum # TODO register loss with custom_loss decorator? -class SupportedLoss(str, Enum): - """ - Available loss functions. - - Currently supported losses: - - - n2v: Noise2Void loss. - """ - +class SupportedLoss(str, BaseEnum): MSE = "mse" MAE = "mae" N2V = "n2v" @@ -18,4 +10,4 @@ class SupportedLoss(str, Enum): # HDN = "hdn" # CE = "ce" # DICE = "dice" - CUSTOM = "custom" \ No newline at end of file + # CUSTOM = "custom" # TODO create mechanism for that diff --git a/src/careamics/config/support/supported_optimizers.py b/src/careamics/config/support/supported_optimizers.py index 9aee2302..483a563e 100644 --- a/src/careamics/config/support/supported_optimizers.py +++ b/src/careamics/config/support/supported_optimizers.py @@ -1,14 +1,7 @@ -"""Convenience functions to instantiate torch.optim optimizers and schedulers.""" -from enum import Enum +from careamics.utils import BaseEnum -class SupportedOptimizer(str, Enum): - """ - Supported optimizers. - - Currently only supports Adam and SGD. - """ - +class SupportedOptimizer(str, BaseEnum): # ASGD = "ASGD" # Adadelta = "Adadelta" # Adagrad = "Adagrad" @@ -24,13 +17,7 @@ class SupportedOptimizer(str, Enum): # SparseAdam = "SparseAdam" -class SupportedScheduler(str, Enum): - """ - Supported learning rate schedulers. - - Currently only supports ReduceLROnPlateau and StepLR. - """ - +class SupportedScheduler(str, BaseEnum): # ChainedScheduler = "ChainedScheduler" # ConstantLR = "ConstantLR" # CosineAnnealingLR = "CosineAnnealingLR" diff --git a/src/careamics/config/support/supported_pixel_manipulations.py b/src/careamics/config/support/supported_pixel_manipulations.py new file mode 100644 index 00000000..1d22f085 --- /dev/null +++ b/src/careamics/config/support/supported_pixel_manipulations.py @@ -0,0 +1,15 @@ +from careamics.utils import BaseEnum + + +class SupportedPixelManipulation(str, BaseEnum): + """_summary_. + + - Uniform: Replace masked pixel value by a (uniformly) randomly selected neighbor + pixel value. + - Median: Replace masked pixel value by the mean of the neighborhood. + """ + + # TODO docs + + UNIFORM = "Uniform" + MEDIAN = "Median" diff --git a/src/careamics/config/support/supported_transforms.py b/src/careamics/config/support/supported_transforms.py index 3f7283d5..2455d1be 100644 --- a/src/careamics/config/support/supported_transforms.py +++ b/src/careamics/config/support/supported_transforms.py @@ -1,11 +1,50 @@ -from enum import Enum +from inspect import getmembers, isclass +import albumentations as Aug -# TODO: custom -class SupportedTransform(str, Enum): +from careamics import transforms +from careamics.utils import BaseEnum - FLIP = "Flip" - RANDOM_ROTATE90 = "RandomRotate90" - NORMALIZE_WO_TARGET = "NormalizeWithoutTarget" - MANIPULATE_N2V = "ManipulateN2V" - CUSTOM = "Custom" \ No newline at end of file +ALL_TRANSFORMS = dict(getmembers(Aug, isclass) + getmembers(transforms, isclass)) + + +def get_all_transforms() -> dict: + """Return all the transforms accepted by CAREamics. + + This includes all transforms from Albumentations (see https://albumentations.ai/), + and custom transforms implemented in CAREamics. + + Note that while any Albumentations transform can be used in CAREamics, no check are + implemented to verify the compatibility of any other transforms than the ones + officially supported (see SupportedTransforms). + + Returns + ------- + dict + A dictionary with all the transforms accepted by CAREamics, where the keys are + the transform names and the values are the transform classes. + """ + return ALL_TRANSFORMS + + +class SupportedTransform(str, BaseEnum): + """Transforms officially supported by CAREamics. + + - Flip: from Albumentations, randomly flip the input horizontally, vertically or + both, parameter `p` can be used to set the probability to apply the transform. + - XYRandomRotate90: #TODO + - Normalize # TODO add details, in particular about the parameters + - ManipulateN2V # TODO add details, in particular about the parameters + - NDFlip + + Note that while any Albumentations (see https://albumentations.ai/) transform can be + used in CAREamics, no check are implemented to verify the compatibility of any other + transforms than the ones officially supported. + """ + + NDFLIP = "NDFlip" + XY_RANDOM_ROTATE90 = "XYRandomRotate90" + NORMALIZE = "Normalize" + N2V_MANIPULATE_UNIFORM = "N2VManipulateUniform" + N2V_MANIPULATE_MEDIAN = "N2VManipulateMedian" + # CUSTOM = "Custom" diff --git a/src/careamics/config/training.py b/src/careamics/config/training_model.py similarity index 99% rename from src/careamics/config/training.py rename to src/careamics/config/training_model.py index 467b61fc..72692ca1 100644 --- a/src/careamics/config/training.py +++ b/src/careamics/config/training_model.py @@ -34,6 +34,7 @@ class AMP(BaseModel): init_scale: int = Field(default=1024, ge=512, le=65536) @field_validator("init_scale") + @classmethod def power_of_two(cls, scale: int) -> int: """ Validate that init_scale is a power of two. diff --git a/src/careamics/config/transform.py b/src/careamics/config/transform.py deleted file mode 100644 index f12be618..00000000 --- a/src/careamics/config/transform.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import annotations - -from inspect import getmembers, isclass -from typing import Literal - -from pydantic import BaseModel, ConfigDict, Field, field_validator, ValidationInfo -import albumentations as Aug -import careamics.transforms.normalize_without_target as custom_transforms -from careamics.utils.torch_utils import filter_parameters - -ALL_TRANSFORMS = dict(getmembers(Aug, isclass) + getmembers(custom_transforms, isclass)) - - -class TransformType: - """Available transforms. - - Can be applied both to an image and to a patch - - """ - - @classmethod - def validate_transform_type(cls, transform: str, parameters: dict) -> None: - """_summary_. - - Parameters - ---------- - transform : Union[str, Transform] - _description_ - parameters : dict - _description_ - - Returns - ------- - BaseModel - _description_ - """ - if transform not in ALL_TRANSFORMS.keys(): - raise ValueError( - f"Incorrect transform name {transform}." - f"Please refer to the documentation" # TODO add link to documentation - ) - # TODO validate provided params against default params - # TODO validate no duplicates - return transform, parameters - - -class TransformModel(BaseModel): - """Whole image transforms. - - Parameters - ---------- - BaseModel : _type_ - _description_ - """ - - model_config = ConfigDict( - validate_assignment=True, - ) - - name: Literal[ - "FLIP", - "RANDOM_ROTATE90", - "NORMALIZE_WO_TARGET", - "MANIPULATE_N2V", - "CUSTOM" - ] - parameters: dict = Field(default={}, validate_default=True) - - - # TODO remove this - @field_validator("parameters") - def validate_transform(cls, params: dict, value: ValidationInfo) -> dict: - """Validate transform parameters.""" - - - transform_name = value.data["name"] - - # filter the user parameters according to the scheduler's signature - parameters, missing_mandatory = filter_parameters( - ALL_TRANSFORMS[transform_name], params - ) - - # if there are missing parameters, raise an error - if len(missing_mandatory) > 0: - raise ValueError( - f"Optimizer {transform_name} requires the following parameters: " - f"{missing_mandatory}." - ) - - return parameters \ No newline at end of file diff --git a/src/careamics/config/transform_model.py b/src/careamics/config/transform_model.py new file mode 100644 index 00000000..60a618e7 --- /dev/null +++ b/src/careamics/config/transform_model.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from careamics.utils.torch_utils import filter_parameters + +from .support import get_all_transforms + + +class TransformModel(BaseModel): + """Pydantic model used to represent an image transformation. + + Accepted transformations are ManipulateN2V, NormalizeWithoutTarget, and all + transformations in Albumentations (see https://albumentations.ai/). + """ + + model_config = ConfigDict( + validate_assignment=True, + ) + + name: str + parameters: dict = Field(default={}, validate_default=True) + + @field_validator("name", mode="plain") + @classmethod + def validate_name(cls, transform_name: str) -> str: + """Validate transform name based on the list of all accepted transforms.""" + if transform_name not in get_all_transforms().keys(): + raise ValueError( + f"Incorrect transform name {transform_name}. Accepted transforms " + f"are ManipulateN2V, NormalizeWithoutTarget, and all transformations " + f"in Albumentations (see https://albumentations.ai/)." + ) + return transform_name + + @model_validator(mode="after") + def validate_transform(self) -> TransformModel: + """Validate transform parameters based on the transform's signature.""" + # filter the user parameters according to the transform's signature + parameters = filter_parameters(get_all_transforms()[self.name], self.parameters) + + # try to instantiate the transform with the filtered parameters + try: + get_all_transforms()[self.name](**parameters) + except Exception as e: + raise ValueError( + f"Error while trying to instantiate the transform {self.name} " + f"with the provided parameters: {parameters}. Are you missing some " + f"mandatory parameters?" + ) from e + + # update the parameters with the filtered ones + # note: assigment would trigger an infinite recursion + self.parameters.clear() + self.parameters.update(parameters) + + return self diff --git a/src/careamics/dataset/__init__.py b/src/careamics/dataset/__init__.py index cf7c1ed0..c519761e 100644 --- a/src/careamics/dataset/__init__.py +++ b/src/careamics/dataset/__init__.py @@ -1 +1,3 @@ """Dataset module.""" +from .in_memory_dataset import InMemoryDataset +from .iterable_dataset import IterableDataset diff --git a/src/careamics/dataset/dataset_utils.py b/src/careamics/dataset/dataset_utils.py deleted file mode 100644 index 4f0c9729..00000000 --- a/src/careamics/dataset/dataset_utils.py +++ /dev/null @@ -1,528 +0,0 @@ -"""Convenience methods for datasets.""" -import logging -from pathlib import Path -from typing import Callable, List, Tuple, Union - -import albumentations as Aug -import numpy as np -import tifffile -import zarr - -from ..config.transform import ALL_TRANSFORMS -from ..utils.logging import get_logger - -logger = get_logger(__name__) - - -def data_type_validator(data_type: str, read_source_func) -> None: - """Validate the data type. - - Parameters - ---------- - data_type : str - Data type. - read_source_func : Callable - Function to read the data. - - Raises - ------ - ValueError - If the data type is not supported without a read_source_func. - """ - if data_type == "custom" and read_source_func is None: - raise ValueError( - f"Data type {data_type} is not supported without a read_source_func." - ) - - -def approximate_file_size(filename: Path) -> int: - """ - Approximate file size. - - Parameters - ---------- - filename : Path - Path to a file. - - Returns - ------- - int - Approximate file size in mbytes. - """ - try: - pointer = tifffile.TiffFile(filename) - return pointer.filehandle.size / 1024**2 - except (tifffile.TiffFileError, StopIteration, FileNotFoundError): - logger.warning(f"File {filename} is not a valid tiff file or is empty.") - return 0 - - -def get_file_sizes(files: List[Path]) -> List[int]: - """ - Get file sizes. - - Parameters - ---------- - files : List[Path] - List of paths to files. - - Returns - ------- - List[int] - List of file sizes in mbytes. - """ - return sum([approximate_file_size(file) for file in files]) - - -def list_files( - data_path: Union[str, Path, List[Union[str, Path]]], - data_type: str, - return_list: bool = True, -) -> Tuple[List[Path], int]: - """Creates a list of paths to source tiff files from path string. - - Parameters - ---------- - data_path : str - Path to the folder containing the data. - data_type : str - data format, e.g. tif - return_list : bool, optional - Whether to return a list of paths or str, by default True - - Returns - ------- - List[Path] - List of pathlib.Path objects. - int - Approximate size of the files in mbytes. - """ - data_path = Path(data_path) if not isinstance(data_path, list) else data_path - data_type = data_type if data_type != "custom" else "" - if isinstance(data_path, list): - files = [] - for path in data_path: - files.append(list_files(path, data_type, return_list=False)) - if len(files) == 0: - raise ValueError( - f"Data path {data_path} is empty or files with extension {data_type}" - f" are not found." - ) - approx_size = get_file_sizes(files) - return files, approx_size - - elif data_path.is_dir(): - if return_list: - files = sorted(Path(data_path).rglob(f"*.{data_type}*")) - if len(files) == 0: - raise ValueError( - f"Data path {data_path} is empty or files with extension" - f" {data_type} are not found." - ) - return files, get_file_sizes(files) - else: - files = sorted(Path(data_path).rglob(f"*.{data_type}*"))[0] - if len(files) == 0: - raise ValueError( - f"Data path {data_path} is empty or files with extension" - f" {data_type} are not found." - ) - - return files, get_file_sizes(files) - - elif data_path.is_file(): - if not data_path.suffix == f".{data_type}": - raise ValueError(f"Wrong extension {data_type}.") - approx_size = approximate_file_size(data_path) - return [data_path] if return_list else data_path, approx_size - - else: - raise ValueError( - f"Data path {data_path} is not a valid directory or a list of filenames." - ) - - -def get_shape_order(shape_in: Tuple, axes_in: str, ref_axes: str = "STCZYX"): - """Return the new shape and axes of x, ordered according to the reference axes. - - Parameters - ---------- - shape_in : Tuple - Input shape. - ref_axes : str - Reference axes. - axes_in : str - Input axes. - - Returns - ------- - Tuple - New shape. - str - New axes. - Tuple - Indices of axes in the new axes order. - """ - indices = [axes_in.find(k) for k in ref_axes] - - # remove all non-existing axes (index == -1) - indices = tuple(filter(lambda k: k != -1, indices)) - - # find axes order and get new shape - new_axes = [axes_in[ind] for ind in indices] - new_shape = tuple([shape_in[ind] for ind in indices]) - - return new_shape, "".join(new_axes), indices - - -def list_diff(list1: List, list2: List) -> List: - """Return the difference between two lists. - - Parameters - ---------- - list1 : List - First list. - list2 : List - Second list. - - Returns - ------- - List - Difference between the two lists. - """ - return list(set(list1) - set(list2)) - - -def reshape_data(x: np.ndarray, axes: str): - """Reshape the data to 'SZYXC' or 'SYXC', merging 'S' and 'T' channels if necessary. - - Parameters - ---------- - x : np.ndarray - Input array. - axes : str - Description of axes in format STCZYX. - - Returns - ------- - np.ndarray - Reshaped array. - str - New axes string. - """ - _x = x - _axes = axes - - # sanity checks - if len(_axes) != len(_x.shape): - raise ValueError(f"Incompatible data ({_x.shape}) and axes ({_axes}).") - - # get new x shape - new_x_shape, new_axes, indices = get_shape_order(_x.shape, _axes) - - # if S is not in the list of axes, then add a singleton S - if "S" not in new_axes: - new_axes = "S" + new_axes - _x = _x[np.newaxis, ...] - new_x_shape = (1,) + new_x_shape - - # need to change the array of indices - indices = [0] + [1 + i for i in indices] - - # reshape by moving axes - destination = list(range(len(indices))) - _x = np.moveaxis(_x, indices, destination) - - # remove T if necessary - if "T" in new_axes: - new_x_shape = (-1,) + new_x_shape[2:] # remove T and S - new_axes = new_axes.replace("T", "") - - # reshape S and T together - _x = _x.reshape(new_x_shape) - - # add channel - if "C" not in new_axes: - # Add channel axis after S - _x = np.expand_dims(_x, new_axes.index("S") + 1) - # get the location of the 1st spatial axis - c_coord = len(new_axes.replace("Z", "").replace("YX", "")) - new_axes = new_axes[:c_coord] + "C" + new_axes[c_coord:] - - return _x, new_axes - - -def validate_files(train_files: List[Path], target_files: List[Path]) -> None: - """ - Validate that the train and target folders are consistent. - - Parameters - ---------- - train_files : List[Path] - List of paths to train files. - target_files : List[Path] - List of paths to target files. - - Raises - ------ - ValueError - If the number of files in train and target folders is not the same. - """ - if len(train_files) != len(target_files): - raise ValueError( - f"Number of train files ({len(train_files)}) is not equal to the number of" - f"target files ({len(target_files)})." - ) - if {f.name for f in train_files} != {f.name for f in target_files}: - raise ValueError("Some filenames in Train and target folders are not the same.") - - -def read_tiff(file_path: Path, axes: str) -> np.ndarray: - """ - Read a tiff file and return a numpy array. - - Parameters - ---------- - file_path : Path - Path to a file. - axes : str - Description of axes in format STCZYX. - - Returns - ------- - np.ndarray - Resulting array. - - Raises - ------ - ValueError - If the file failed to open. - OSError - If the file failed to open. - ValueError - If the file is not a valid tiff. - ValueError - If the data dimensions are incorrect. - ValueError - If the axes length is incorrect. - """ - if file_path.suffix[:4] == ".tif": - try: - array = tifffile.imread(file_path) - except (ValueError, OSError) as e: - logging.exception(f"Exception in file {file_path}: {e}, skipping it.") - raise e - else: - raise ValueError(f"File {file_path} is not a valid tiff.") - - if len(array.shape) < 2 or len(array.shape) > 6: - raise ValueError( - f"Incorrect data dimensions. Must be 2, 3 or 4 (got {array.shape} for" - f"file {file_path})." - ) - - return array - - -def read_zarr( - zarr_source: zarr.Group, axes: str -) -> Union[zarr.core.Array, zarr.storage.DirectoryStore, zarr.hierarchy.Group]: - """Reads a file and returns a pointer. - - Parameters - ---------- - file_path : Path - pathlib.Path object containing a path to a file - - Returns - ------- - np.ndarray - Pointer to zarr storage - - Raises - ------ - ValueError, OSError - if a file is not a valid tiff or damaged - ValueError - if data dimensions are not 2, 3 or 4 - ValueError - if axes parameter from config is not consistent with data dimensions - """ - if isinstance(zarr_source, zarr.hierarchy.Group): - array = zarr_source[0] - - elif isinstance(zarr_source, zarr.storage.DirectoryStore): - raise NotImplementedError("DirectoryStore not supported yet") - - elif isinstance(zarr_source, zarr.core.Array): - # array should be of shape (S, (C), (Z), Y, X), iterating over S ? - if zarr_source.dtype == "O": - raise NotImplementedError("Object type not supported yet") - else: - array = zarr_source - else: - raise ValueError(f"Unsupported zarr object type {type(zarr_source)}") - - # sanity check on dimensions - if len(array.shape) < 2 or len(array.shape) > 4: - raise ValueError( - f"Incorrect data dimensions. Must be 2, 3 or 4 (got {array.shape})." - ) - - # sanity check on axes length - if len(axes) != len(array.shape): - raise ValueError(f"Incorrect axes length (got {axes}).") - - # arr = fix_axes(arr, axes) - return array - - -def get_patch_transform( - patch_transforms: Union[List, Aug.Compose, None], - mean: float, - std: float, - target: bool, - normalize_mask: bool = True, -) -> Union[None, Callable]: - """Return a pixel manipulation function. - - Used in N2V family of algorithms. - - Parameters - ---------- - patch_transform_type : str - Type of patch transform. - target : bool - Whether the transform is applied to the target(if the target is present). - mode : str - Train or predict mode. - - Returns - ------- - Union[None, Callable] - Patch transform function. - """ - if patch_transforms is None: - return Aug.Compose( - [Aug.NoOp()], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - elif isinstance(patch_transforms, list): - patch_transforms[[t["name"] for t in patch_transforms].index("Normalize")][ - "parameters" - ] = { - "mean": mean, - "std": std, - "max_pixel_value": 1, - } - # TODO not very readable - return Aug.Compose( - [ - ALL_TRANSFORMS[transform["name"]](**transform["parameters"]) - if "parameters" in transform - else ALL_TRANSFORMS[transform["name"]]() - for transform in patch_transforms - ], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - elif isinstance(patch_transforms, Aug.Compose): - return Aug.Compose( - [ - t - for t in patch_transforms.transforms[:-1] - if not isinstance(t, Aug.Normalize) - ] - + [ - Aug.Normalize(mean=mean, std=std, max_pixel_value=1), - patch_transforms.transforms[-1] - if patch_transforms.transforms[-1].__class__.__name__ == "ManipulateN2V" - else Aug.NoOp(), - ], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - else: - raise ValueError( - f"Incorrect patch transform type {patch_transforms}. " - f"Please refer to the documentation." # TODO add link to documentation - ) - - -# TODO add tta -def get_patch_transform_predict( - patch_transforms: Union[List, Aug.Compose, None], - mean: float, - std: float, - target: bool, - normalize_mask: bool = True, -) -> Union[None, Callable]: - """Return a pixel manipulation function. - - Used in N2V family of algorithms. - - Parameters - ---------- - patch_transform_type : str - Type of patch transform. - target : bool - Whether the transform is applied to the target(if the target is present). - mode : str - Train or predict mode. - - Returns - ------- - Union[None, Callable] - Patch transform function. - """ - if patch_transforms is None: - return Aug.Compose( - [Aug.NoOp()], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - elif isinstance(patch_transforms, list): - patch_transforms[[t["name"] for t in patch_transforms].index("Normalize")][ - "parameters" - ] = { - "mean": mean, - "std": std, - "max_pixel_value": 1, - } - # TODO not very readable - return Aug.Compose( - [ - ALL_TRANSFORMS[transform["name"]](**transform["parameters"]) - if "parameters" in transform - else ALL_TRANSFORMS[transform["name"]]() - for transform in patch_transforms - if transform["name"] != "ManipulateN2V" - ], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - elif isinstance(patch_transforms, Aug.Compose): - return Aug.Compose( - [ - t - for t in patch_transforms.transforms[:-1] - if not isinstance(t, Aug.Normalize) - ] - + [ - Aug.Normalize(mean=mean, std=std, max_pixel_value=1), - ], - additional_targets={"target": "image"} - if (target and normalize_mask) - else {}, - ) - else: - raise ValueError( - f"Incorrect patch transform type {patch_transforms}. " - f"Please refer to the documentation." # TODO add link to documentation - ) diff --git a/src/careamics/dataset/dataset_utils/__init__.py b/src/careamics/dataset/dataset_utils/__init__.py new file mode 100644 index 00000000..0543a8ee --- /dev/null +++ b/src/careamics/dataset/dataset_utils/__init__.py @@ -0,0 +1,4 @@ +from .dataset_utils import reshape_array +from .file_utils import get_files_size, list_files, validate_source_target_files +from .read_tiff import read_tiff +from .read_utils import get_read_func diff --git a/src/careamics/dataset/dataset_utils/dataset_utils.py b/src/careamics/dataset/dataset_utils/dataset_utils.py new file mode 100644 index 00000000..3899543c --- /dev/null +++ b/src/careamics/dataset/dataset_utils/dataset_utils.py @@ -0,0 +1,113 @@ +"""Convenience methods for datasets.""" +from typing import List, Tuple + +import numpy as np + +from careamics.utils.logging import get_logger + +logger = get_logger(__name__) + + +def get_shape_order(shape_in: Tuple, axes_in: str, ref_axes: str = "STCZYX"): + """Return the new shape and axes of x, ordered according to the reference axes. + + Parameters + ---------- + shape_in : Tuple + Input shape. + ref_axes : str + Reference axes. + axes_in : str + Input axes. + + Returns + ------- + Tuple + New shape. + str + New axes. + Tuple + Indices of axes in the new axes order. + """ + indices = [axes_in.find(k) for k in ref_axes] + + # remove all non-existing axes (index == -1) + indices = tuple(filter(lambda k: k != -1, indices)) + + # find axes order and get new shape + new_axes = [axes_in[ind] for ind in indices] + new_shape = tuple([shape_in[ind] for ind in indices]) + + return new_shape, "".join(new_axes), indices + + +def list_diff(list1: List, list2: List) -> List: + """Return the difference between two lists. + + Parameters + ---------- + list1 : List + First list. + list2 : List + Second list. + + Returns + ------- + List + Difference between the two lists. + """ + return list(set(list1) - set(list2)) + + +def reshape_array(x: np.ndarray, axes: str) -> np.ndarray: + """Reshape the data to 'SZYXC' or 'SYXC', merging 'S' and 'T' channels if necessary. + + Parameters + ---------- + x : np.ndarray + Input array. + axes : str + Description of axes in format STCZYX. + + Returns + ------- + np.ndarray + Reshaped array. + """ + _x = x + _axes = axes + + # sanity checks + if len(_axes) != len(_x.shape): + raise ValueError(f"Incompatible data shape ({_x.shape}) and axes ({_axes}).") + + # get new x shape + new_x_shape, new_axes, indices = get_shape_order(_x.shape, _axes) + + # if S is not in the list of axes, then add a singleton S + if "S" not in new_axes: + new_axes = "S" + new_axes + _x = _x[np.newaxis, ...] + new_x_shape = (1,) + new_x_shape + + # need to change the array of indices + indices = [0] + [1 + i for i in indices] + + # reshape by moving axes + destination = list(range(len(indices))) + _x = np.moveaxis(_x, indices, destination) + + # remove T if necessary + if "T" in new_axes: + new_x_shape = (-1,) + new_x_shape[2:] # remove T and S + new_axes = new_axes.replace("T", "") + + # reshape S and T together + _x = _x.reshape(new_x_shape) + + # add channel + if "C" not in new_axes: + # Add channel axis after S + _x = np.expand_dims(_x, new_axes.index("S") + 1) + + return _x diff --git a/src/careamics/dataset/dataset_utils/file_utils.py b/src/careamics/dataset/dataset_utils/file_utils.py new file mode 100644 index 00000000..d0d39bcf --- /dev/null +++ b/src/careamics/dataset/dataset_utils/file_utils.py @@ -0,0 +1,163 @@ +from fnmatch import fnmatch +from pathlib import Path +from typing import List, Union + +from tifffile import TiffFile, TiffFileError + +from careamics.config.support import SupportedData +from careamics.utils.logging import get_logger + +logger = get_logger(__name__) + + +# TODO no difference with the normal way to get file size +def _approximate_tiff_file_size(filename: Path) -> int: + """ + Approximate TIFF file size in MB. + + Parameters + ---------- + filename : Path + Path to a TIFF file. + + Returns + ------- + int + Approximate file size in MB. + """ + try: + pointer = TiffFile(filename) + return pointer.filehandle.size / 1024**2 + except (TiffFileError, StopIteration, FileNotFoundError): + logger.warning(f"File {filename} is not a valid tiff file or is empty.") + return 0 + + +def get_files_size(files: List[Path]) -> int: + """ + Get files size in MB. + + Parameters + ---------- + files : List[Path] + List of files. + + Returns + ------- + int + Total size of the files in MB. + """ + return sum(f.stat().st_size / 1024**2 for f in files) + + +def list_files( + data_path: Union[str, Path], + data_type: Union[str, SupportedData], + extension_filter: str = "", +) -> List[Path]: + """Creates a recursive list of files in `data_path`. + + If `data_path` is a file, its name is validated against the `data_type` using + `fnmatch`, and the method returns `data_path` itself. + + By default, if `data_type` is equal to `custom`, all files will be listed. To + further filter the files, use `extension_filter`. + + `extension_filter` must be compatible with `fnmatch` and `Path.rglob`, e.g. "*.npy" + or "*.czi". + + Parameters + ---------- + data_path : Union[str, Path] + Path to the folder containing the data. + data_type : Union[str, SupportedData] + One of the supported data type (e.g. tif, custom). + extension_filter : str, optional + Extension filter, by default "". + + Returns + ------- + List[Path] + List of pathlib.Path objects. + + Raises + ------ + FileNotFoundError + If the data path does not exist. + ValueError + If the data path is empty or no files with the extension were found. + ValueError + If the file does not match the requested extension. + """ + # convert to Path + data_path = Path(data_path) + + # raise error if does not exists + if not data_path.exists(): + raise FileNotFoundError(f"Data path {data_path} does not exist.") + + # get extension compatible with fnmatch and rglob search + extension = SupportedData.get_extension(data_type) + + if data_type == SupportedData.CUSTOM and extension_filter != "": + extension = extension_filter + + # search recurively + if data_path.is_dir(): + # search recursively the path for files with the extension + files = sorted(data_path.rglob(extension)) + else: + # raise error if it has the wrong extension + if not fnmatch(data_path, extension): + raise ValueError( + f"File {data_path} does not match the requested extension " + f'"{extension}".' + ) + + # save in list + files = [data_path] + + # raise error if no files were found + if len(files) == 0: + raise ValueError( + f'Data path {data_path} is empty or files with extension "{extension}" ' + f"were not found." + ) + + return files + + +def validate_source_target_files(src_files: List[Path], tar_files: List[Path]) -> None: + """ + Validate source and target path lists. + + The two lists should have the same number of files, and the filenames should match. + + Parameters + ---------- + src_files : List[Path] + List of source files. + tar_files : List[Path] + List of target files. + + Raises + ------ + ValueError + If the number of files in source and target folders is not the same. + ValueError + If some filenames in Train and target folders are not the same. + """ + # check equal length + if len(src_files) != len(tar_files): + raise ValueError( + f"The number of source files ({len(src_files)}) is not equal to the number " + f"of target files ({len(tar_files)})." + ) + + # check identical names + src_names = {f.name for f in src_files} + tar_names = {f.name for f in tar_files} + difference = src_names.symmetric_difference(tar_names) + + if len(difference) > 0: + raise ValueError(f"Source and target files have different names: {difference}.") diff --git a/src/careamics/dataset/dataset_utils/read_tiff.py b/src/careamics/dataset/dataset_utils/read_tiff.py new file mode 100644 index 00000000..7b4dd8e0 --- /dev/null +++ b/src/careamics/dataset/dataset_utils/read_tiff.py @@ -0,0 +1,61 @@ +import logging +from fnmatch import fnmatch +from pathlib import Path + +import numpy as np +import tifffile + +from careamics.config.support import SupportedData +from careamics.utils.logging import get_logger + +logger = get_logger(__name__) + + +def read_tiff(file_path: Path, *args: list, **kwargs: dict) -> np.ndarray: + """ + Read a tiff file and return a numpy array. + + Parameters + ---------- + file_path : Path + Path to a file. + axes : str + Description of axes in format STCZYX. + + Returns + ------- + np.ndarray + Resulting array. + + Raises + ------ + ValueError + If the file failed to open. + OSError + If the file failed to open. + ValueError + If the file is not a valid tiff. + ValueError + If the data dimensions are incorrect. + ValueError + If the axes length is incorrect. + """ + if fnmatch(file_path.suffix, SupportedData.get_extension(SupportedData.TIFF)): + try: + array = tifffile.imread(file_path) + except (ValueError, OSError) as e: + logging.exception(f"Exception in file {file_path}: {e}, skipping it.") + raise e + else: + raise ValueError(f"File {file_path} is not a valid tiff.") + + # check dimensions + # TODO or should this really be done here? probably in the LightningDataModule + # TODO this should also be centralized somewhere else (validate_dimensions) + if len(array.shape) < 2 or len(array.shape) > 6: + raise ValueError( + f"Incorrect data dimensions. Must be 2, 3 or 4 (got {array.shape} for" + f"file {file_path})." + ) + + return array diff --git a/src/careamics/dataset/dataset_utils/read_utils.py b/src/careamics/dataset/dataset_utils/read_utils.py new file mode 100644 index 00000000..5c74ac79 --- /dev/null +++ b/src/careamics/dataset/dataset_utils/read_utils.py @@ -0,0 +1,29 @@ +from typing import Callable + +from careamics.config.support import SupportedData + +from .read_tiff import read_tiff + + +def get_read_func(data_type: SupportedData) -> Callable: + """ + Get the read function for the data type. + + Parameters + ---------- + data_type : SupportedData + Data type. + + Returns + ------- + Callable + Read function. + """ + if data_type == SupportedData.ARRAY: + return None + elif data_type == SupportedData.TIFF: + return read_tiff + elif data_type == SupportedData.CUSTOM: + return None + else: + raise NotImplementedError(f"Data type {data_type} is not supported.") diff --git a/src/careamics/dataset/dataset_utils/read_zarr.py b/src/careamics/dataset/dataset_utils/read_zarr.py new file mode 100644 index 00000000..5878a1cf --- /dev/null +++ b/src/careamics/dataset/dataset_utils/read_zarr.py @@ -0,0 +1,56 @@ +from typing import Union + +from zarr import Group, core, hierarchy, storage + + +def read_zarr( + zarr_source: Group, axes: str +) -> Union[core.Array, storage.DirectoryStore, hierarchy.Group]: + """Reads a file and returns a pointer. + + Parameters + ---------- + file_path : Path + pathlib.Path object containing a path to a file + + Returns + ------- + np.ndarray + Pointer to zarr storage + + Raises + ------ + ValueError, OSError + if a file is not a valid tiff or damaged + ValueError + if data dimensions are not 2, 3 or 4 + ValueError + if axes parameter from config is not consistent with data dimensions + """ + if isinstance(zarr_source, hierarchy.Group): + array = zarr_source[0] + + elif isinstance(zarr_source, storage.DirectoryStore): + raise NotImplementedError("DirectoryStore not supported yet") + + elif isinstance(zarr_source, core.Array): + # array should be of shape (S, (C), (Z), Y, X), iterating over S ? + if zarr_source.dtype == "O": + raise NotImplementedError("Object type not supported yet") + else: + array = zarr_source + else: + raise ValueError(f"Unsupported zarr object type {type(zarr_source)}") + + # sanity check on dimensions + if len(array.shape) < 2 or len(array.shape) > 4: + raise ValueError( + f"Incorrect data dimensions. Must be 2, 3 or 4 (got {array.shape})." + ) + + # sanity check on axes length + if len(axes) != len(array.shape): + raise ValueError(f"Incorrect axes length (got {axes}).") + + # arr = fix_axes(arr, axes) + return array diff --git a/src/careamics/dataset/in_memory_dataset.py b/src/careamics/dataset/in_memory_dataset.py index c8d55faa..ae148bbc 100644 --- a/src/careamics/dataset/in_memory_dataset.py +++ b/src/careamics/dataset/in_memory_dataset.py @@ -1,137 +1,133 @@ """In-memory dataset module.""" +from __future__ import annotations + +import copy from pathlib import Path from typing import Callable, List, Optional, Tuple, Union import numpy as np import torch -from ..config.data import DataModel +from ..config.data_model import DataModel from ..utils import normalize from ..utils.logging import get_logger -from .dataset_utils import get_patch_transform, read_tiff -from .patching import ( +from .dataset_utils import read_tiff +from .patching.patch_transform import get_patch_transform +from .patching.patching import ( generate_patches_predict, prepare_patches_supervised, + prepare_patches_supervised_array, prepare_patches_unsupervised, + prepare_patches_unsupervised_array, ) logger = get_logger(__name__) +# TODO dataset which sets appart some data for validation? class InMemoryDataset(torch.utils.data.Dataset): - """ - Dataset storing data in memory and allowing generating patches from it. - - Parameters - ---------- - data_path : Union[str, Path] - Path to the data, must be a directory. - axes : str - Description of axes in format STCZYX. - patch_extraction_method : ExtractionStrategies - Patch extraction strategy, as defined in extraction_strategy. - patch_size : Union[List[int], Tuple[int]] - Size of the patches along each axis, must be of dimension 2 or 3. - patch_overlap : Optional[Union[List[int], Tuple[int]]], optional - Overlap of the patches, must be of dimension 2 or 3, by default None. - mean : Optional[float], optional - Expected mean of the dataset, by default None. - std : Optional[float], optional - Expected standard deviation of the dataset, by default None. - patch_transform : Optional[Callable], optional - Patch transform to apply, by default None. Contains type and parameters in a - dict. Used in N2V family of algorithms, or any custom patch - manipulation/augmentation. - """ + """Dataset storing data in memory and allowing generating patches from it.""" def __init__( self, - files: List[Path], - config: DataModel, - target_files: Optional[List[Path]] = None, - read_source_func: Optional[Callable] = None, + data_config: DataModel, + data: Union[np.ndarray, List[Path]], + data_target: Optional[Union[np.ndarray, List[Path]]] = None, + read_source_func: Callable = read_tiff, **kwargs, ) -> None: """ Constructor. - Parameters - ---------- - data_path : Union[str, Path] - Path to the data, must be a directory. - axes : str - Description of axes in format STCZYX. - patch_size : Union[List[int], Tuple[int]] - Size of the patches along each axis, must be of dimension 2 or 3. - mean : Optional[float], optional - Expected mean of the dataset, by default None. - std : Optional[float], optional - Expected standard deviation of the dataset, by default None. - patch_transform : Optional[Callable], optional - Patch transform to apply, by default None. Could be any augmentation - function, or algorithm specific pixel manipulation (N2V family). - Please refer to the documentation for more details. - # TODO add link - - Raises - ------ - ValueError - If data_path is not a directory. + # TODO """ - self.files = files - self.target_files = target_files - self.axes = config.axes - self.algorithm = None # TODO add algorithm type + if data_target is not None: + raise NotImplementedError("Targets are not yet supported.") - self.read_source_func = ( - read_source_func if read_source_func is not None else read_tiff - ) + self.data = data + self.data_target = data_target + self.axes = data_config.axes + self.patch_size = data_config.patch_size - self.patch_size = config.patch_size + # read function + self.read_source_func = read_source_func # Generate patches - self.data, self.targets, computed_mean, computed_std = self._prepare_patches() + supervised = self.data_target is not None + patches = self._prepare_patches(supervised) - if not config.mean or not config.std: + # Add results to members + self.patches, self.patch_targets, computed_mean, computed_std = patches + + if not data_config.mean or not data_config.std: self.mean, self.std = computed_mean, computed_std logger.info(f"Computed dataset mean: {self.mean}, std: {self.std}") - assert self.mean is not None - assert self.std is not None + # if the transforms are not an instance of Compose + if data_config.has_tranform_list(): + # update mean and std in configuration + # the object is mutable and should then be recorded in the CAREamist obj + data_config.set_mean_and_std(self.mean, self.std) + else: + self.mean, self.std = data_config.mean, data_config.std self.patch_transform = get_patch_transform( - patch_transforms=config.transforms, - mean=self.mean, - std=self.std, - target=self.target_files is not None, + patch_transforms=data_config.transforms, + with_target=self.data_target is not None, ) - def _prepare_patches(self) -> Callable: + def _prepare_patches( + self, supervised: bool + ) -> Tuple[np.ndarray, Optional[np.ndarray], float, float]: """ Iterate over data source and create an array of patches. - Calls consecutive function for supervised and unsupervised learning. + Parameters + ---------- + supervised : bool + Whether the dataset is supervised or not. Returns ------- np.ndarray Array of patches. """ - if self.target_files is not None: - return prepare_patches_supervised( - self.files, - self.target_files, - self.axes, - self.patch_size, - self.read_source_func, - ) + # if numpy array + if isinstance(self.data, np.ndarray): + # supervised case: CARE, N2N, segmentation etc. + if supervised: + return prepare_patches_supervised_array( + self.data, + self.axes, + self.data_target, + self.patch_size, + ) + # unsupervised: N2V, PN2V, etc. + else: + return prepare_patches_unsupervised_array( + self.data, + self.axes, + self.patch_size, + ) + # else it is a list of paths else: - return prepare_patches_unsupervised( - self.files, - self.axes, - self.patch_size, - self.read_source_func, - ) + # supervised case: CARE, N2N, segmentation etc. + if supervised: + return prepare_patches_supervised( + self.data, + self.data_target, + self.axes, + self.patch_size, + self.read_source_func, + ) + # unsupervised: N2V, PN2V, etc. + else: + return prepare_patches_unsupervised( + self.data, + self.axes, + self.patch_size, + self.read_source_func, + ) def __len__(self) -> int: """ @@ -142,8 +138,7 @@ def __len__(self) -> int: int Length of the dataset. """ - # convert to numpy array to convince mypy that it is not a generator - return self.data.shape[0] + return self.patches.shape[0] def __getitem__(self, index: int) -> Tuple[np.ndarray]: """ @@ -164,61 +159,134 @@ def __getitem__(self, index: int) -> Tuple[np.ndarray]: ValueError If dataset mean and std are not set. """ - patch = self.data[index] - - if self.mean is not None and self.std is not None: - if self.target_files is not None: - # Splitting targets into a list. 1st dim is the number of targets - target = self.targets[index, ...] - # Move channels to the last dimension for the transform - transformed = self.patch_transform( - image=np.moveaxis(patch, 0, -1), target=np.moveaxis(target, 0, -1) - ) - patch, target = np.moveaxis(transformed["image"], -1, 0), np.moveaxis( - transformed["target"], -1, 0 - ) # TODO check if this is correct! - return patch, target - else: - patch = self.patch_transform(image=np.moveaxis(patch, 0, -1))["image"] - return patch + patch = self.patches[index] + + # if there is a target + if self.data_target is not None: + # get target + target = self.patch_targets[index] + + # Albumentations requires Channel last + c_patch = np.moveaxis(patch, 0, -1) + c_target = np.moveaxis(target, 0, -1) + + # Apply transforms + transformed = self.patch_transform(image=c_patch, target=c_target) + + # move axes back + patch = np.moveaxis(transformed["image"], -1, 0) + target = np.moveaxis(transformed["target"], -1, 0) + + return patch, target else: - raise ValueError("Dataset mean and std must be set before using it.") + # Albumentations requires Channel last + patch = np.moveaxis(patch, 0, -1) + + # Apply transforms + transformed_patch = self.patch_transform(image=patch)["image"] + manip_patch, patch, mask = transformed_patch + + # move C axes back + manip_patch = np.moveaxis(manip_patch, -1, 0) + patch = np.moveaxis(patch, -1, 0) + mask = np.moveaxis(mask, -1, 0) + return (manip_patch, patch, mask) + def get_number_of_patches(self) -> int: + """ + Return the number of patches in the dataset. + + Returns + ------- + int + Number of patches in the dataset. + """ + return self.patches.shape[0] + + def split_dataset( + self, + percentage: float = 0.1, + minimum_number: int = 5, + ) -> InMemoryDataset: + """Split a new dataset away from the current one. + + This method is used to extract random validation patches from the dataset. + + Parameters + ---------- + percentage : float, optional + Percentage of patches to extract, by default 0.1. + minimum_number : int, optional + Minimum number of patches to extract, by default 5. + + Returns + ------- + InMemoryDataset + New dataset with the extracted patches. + + Raises + ------ + ValueError + If `percentage` is not between 0 and 1. + ValueError + If `minimum_number` is not between 1 and the number of patches. + """ + if percentage < 0 or percentage > 1: + raise ValueError(f"Percentage must be between 0 and 1, got {percentage}.") + + if minimum_number < 1 or minimum_number > self.get_number_of_patches(): + raise ValueError( + f"Minimum number of patches must be between 1 and " + f"{self.get_number_of_patches()} (number of patches), got {minimum_number}." + ) + + total_patches = self.get_number_of_patches() + + # number of patches to extract (either percentage rounded or minimum number) + n_patches = max(round(total_patches * percentage), minimum_number) + + # get random indices + indices = np.random.choice(total_patches, n_patches, replace=False) + + # extract patches + val_patches = self.patches[indices] + + # remove patches from self.patch + self.patches = np.delete(self.patches, indices, axis=0) + + # same for targets + if self.patch_targets is not None: + val_targets = self.patch_targets[indices] + self.patch_targets = np.delete(self.patch_targets, indices, axis=0) + + # clone the dataset + dataset = copy.deepcopy(self) + + # reassign patches + dataset.patches = val_patches + + # reassign targets + if self.patch_targets is not None: + dataset.patch_targets = val_targets + + return dataset + + +# TODO add tile size class InMemoryPredictionDataset(torch.utils.data.Dataset): """ Dataset storing data in memory and allowing generating patches from it. - Parameters - ---------- - data_path : Union[str, Path] - Path to the data, must be a directory. - axes : str - Description of axes in format STCZYX. - patch_extraction_method : ExtractionStrategies - Patch extraction strategy, as defined in extraction_strategy. - patch_size : Union[List[int], Tuple[int]] - Size of the patches along each axis, must be of dimension 2 or 3. - patch_overlap : Optional[Union[List[int], Tuple[int]]], optional - Overlap of the patches, must be of dimension 2 or 3, by default None. - mean : Optional[float], optional - Expected mean of the dataset, by default None. - std : Optional[float], optional - Expected standard deviation of the dataset, by default None. - patch_transform : Optional[Callable], optional - Patch transform to apply, by default None. Contains type and parameters in a - dict. Used in N2V family of algorithms, or any custom patch - manipulation/augmentation. + # TODO """ def __init__( self, - array: np.ndarray, - axes: str, + data_config: DataModel, + data: np.ndarray, tile_size: Union[List[int], Tuple[int]], tile_overlap: Optional[Union[List[int], Tuple[int]]] = None, - mean: Optional[float] = None, - std: Optional[float] = None, ) -> None: """Constructor. @@ -240,22 +308,26 @@ def __init__( ValueError If data_path is not a directory. """ - self.input_array = array - self.axes = axes + self.data_config = data_config + self.axes = data_config.axes self.tile_size = tile_size self.tile_overlap = tile_overlap + self.mean = data_config.mean + self.std = data_config.std + + # check that mean and std are provided + if not self.mean or not self.std: + raise ValueError( + "Mean and std must be provided to the configuration in order to " + " perform prediction." + ) - self.mean = mean - self.std = std - + self.input_array = data self.tile = tile_size and tile_overlap # Generate patches self.data = self._prepare_patches() - if not mean or not std: - raise ValueError("Mean and std must be provided for performing prediction") - def _prepare_patches(self) -> Callable: """ Iterate over data source and create an array of patches. diff --git a/src/careamics/dataset/iterable_dataset.py b/src/careamics/dataset/iterable_dataset.py index 5e51e018..5ba3cf17 100644 --- a/src/careamics/dataset/iterable_dataset.py +++ b/src/careamics/dataset/iterable_dataset.py @@ -1,24 +1,27 @@ -""" -Tiff dataset module. +from __future__ import annotations -This module contains the implementation of the TiffDataset class, which allows loading -tiff files. -""" +import copy from pathlib import Path from typing import Callable, Generator, List, Optional, Tuple, Union import numpy as np -import torch +from torch.utils.data import IterableDataset, get_worker_info -from ..config.data import DataModel +from ..config.data_model import DataModel +from ..config.support import SupportedExtractionStrategy from ..utils.logging import get_logger -from .dataset_utils import get_patch_transform, get_patch_transform_predict, read_tiff -from .patching import generate_patches_supervised, generate_patches_unsupervised +from .dataset_utils import read_tiff, reshape_array +from .patching import ( + generate_patches_predict, + generate_patches_supervised, + generate_patches_unsupervised, + get_patch_transform, +) logger = get_logger(__name__) -class IterableDataset(torch.utils.data.IterableDataset): +class IterableDataset(IterableDataset): """ Dataset allowing extracting patches w/o loading whole data into memory. @@ -44,27 +47,34 @@ class IterableDataset(torch.utils.data.IterableDataset): def __init__( self, - files: List[Path], - config: DataModel, - target_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None, - read_source_func: Optional[Callable] = None, - **kwargs, + data_config: DataModel, + src_files: List[Path], + target_files: Optional[List[Path]] = None, + read_source_func: Callable = read_tiff, ) -> None: - self.data_files = files + if target_files is not None: + raise NotImplementedError("Targets are not yet supported.") + + self.data_files = src_files self.target_files = target_files - self.axes = config.axes - self.patch_size = config.patch_size - self.patch_extraction_method = "random" - self.read_source_func = read_source_func if read_source_func else read_tiff + self.axes = data_config.axes + self.patch_size = data_config.patch_size + self.read_source_func = read_source_func - if not config.mean or not config.std: + # compute mean and std over the dataset + if not data_config.mean or not data_config.std: self.mean, self.std = self._calculate_mean_and_std() + # if the transforms are not an instance of Compose + if data_config.has_tranform_list(): + # update mean and std in configuration + # the object is mutable and should then be recorded in the CAREamist obj + data_config.set_mean_and_std(self.mean, self.std) + + # get transforms self.patch_transform = get_patch_transform( - patch_transforms=config.transforms, - mean=self.mean, - std=self.std, - target=target_files is not None, + patch_transforms=data_config.transforms, + with_target=target_files is not None, ) def _calculate_mean_and_std(self) -> Tuple[float, float]: @@ -79,11 +89,14 @@ def _calculate_mean_and_std(self) -> Tuple[float, float]: means, stds = 0, 0 num_samples = 0 - for sample in self._iterate_files(): + for sample in self._iterate_over_files(): means += sample.mean() - stds += np.std(sample) + stds += sample.std() num_samples += 1 + if num_samples == 0: + raise ValueError("No samples found in the dataset.") + result_mean = means / num_samples result_std = stds / num_samples @@ -91,7 +104,7 @@ def _calculate_mean_and_std(self) -> Tuple[float, float]: logger.info(f"Mean: {result_mean}, std: {result_std}") return result_mean, result_std - def _iterate_files(self) -> Generator: + def _iterate_over_files(self) -> Generator[Tuple[np.ndarray, ...], None, None]: """ Iterate over data source and yield whole image. @@ -104,23 +117,41 @@ def _iterate_files(self) -> Generator: # dataset object # Configuring each copy independently to avoid having duplicate data returned # from the workers - worker_info = torch.utils.data.get_worker_info() + worker_info = get_worker_info() worker_id = worker_info.id if worker_info is not None else 0 num_workers = worker_info.num_workers if worker_info is not None else 1 + # iterate over the files for i, filename in enumerate(self.data_files): + # retrieve file corresponding to the worker id if i % num_workers == worker_id: - sample = self.read_source_func(filename, self.axes) - if self.target_files is not None: - if filename.name != self.target_files[i].name: - raise ValueError( - f"File {filename} does not match target file " - f"{self.target_files[i]}" - ) - target = self.read_source_func(self.target_files[i], self.axes) - yield sample, target - else: - yield sample + try: + # read data + sample = self.read_source_func(filename, self.axes) + + # reshape data + reshaped_sample = reshape_array(sample, self.axes) + + # read target, if available + if self.target_files is not None: + if filename.name != self.target_files[i].name: + raise ValueError( + f"File {filename} does not match target file " + f"{self.target_files[i]}. Have you passed sorted " + f"arrays?" + ) + + # read target + target = self.read_source_func(self.target_files[i], self.axes) + + # reshape target + reshaped_target = reshape_array(target, self.axes) + + yield reshaped_sample, reshaped_target + else: + yield reshaped_sample + except Exception as e: + logger.error(f"Error reading file {filename}: {e}") def __iter__(self) -> Generator[np.ndarray, None, None]: """ @@ -135,50 +166,136 @@ def __iter__(self) -> Generator[np.ndarray, None, None]: self.mean is not None and self.std is not None ), "Mean and std must be provided" - for sample in self._iterate_files(): + # iterate over files + for sample in self._iterate_over_files(): if self.target_files is not None: + sample_input, sample_target = sample patches = generate_patches_supervised( - sample, - self.axes, - self.patch_extraction_method, - self.patch_size, + sample=sample_input, + axes=self.axes, + patch_extraction_method=SupportedExtractionStrategy.RANDOM, + patch_size=self.patch_size, + target=sample_target, ) else: patches = generate_patches_unsupervised( - sample, - self.axes, - self.patch_extraction_method, - self.patch_size, + sample=sample, + axes=self.axes, + patch_extraction_method=SupportedExtractionStrategy.RANDOM, + patch_size=self.patch_size, ) + # iterate over patches + # patches are tuples of (patch, target) if target is available + # or (patch, None) only if no target is available + # patch is of dimensions (C)ZYX for patch_data in patches: - if isinstance(patch_data, tuple): - if self.target_files is not None: - target = patch_data[1:] - transformed = self.patch_transform( - image=np.moveaxis(patch_data[0], 0, -1), - target=np.moveaxis(target, 0, -1), - ) - yield (transformed["image"], transformed["mask"]) - # TODO fix dimensions - else: - transformed = self.patch_transform( - image=np.moveaxis(patch_data[0], 0, -1) - ) - yield (transformed["image"], *patch_data[1:]) + # if there is a target + if self.target_files is not None: + # Albumentations expects the channel dimension to be last + c_patch = np.moveaxis(patch_data[0], 0, -1) + c_target = np.moveaxis(patch_data[1], 0, -1) + + # apply the transform to the patch and the target + transformed = self.patch_transform( + image=c_patch, + target=c_target, + ) + + # TODO if ManipulateN2V, then we get a tuple not an array! + # TODO if "target" string is used, then make it a co or enum + + # move the axes back to the original position + c_patch = np.moveaxis(transformed["image"], -1, 0) + c_target = np.moveaxis(transformed["target"], -1, 0) + + yield (c_patch, c_target) else: - yield self.patch_transform(image=patch_data)["image"] + # Albumentations expects the channel dimension to be last + patch = np.moveaxis(patch_data[0], 0, -1) + + # apply transform + transformed = self.patch_transform(image=patch) - # else: - # # if S or T dims are not empty - assume every image is a separate - # # sample in dim 0 - # for i in range(sample.shape[0]): - # item = np.expand_dims(sample[i], (0, 1)) - # item = normalize(img=item, mean=self.mean, std=self.std) - # yield item + # TODO is there a chance that ManipulateN2V is not in transforms? + # retrieve the output of ManipulateN2V + masked_patch, patch, mask = transformed["image"] + # move C axes back + masked_patch = np.moveaxis(masked_patch, -1, 0) + patch = np.moveaxis(patch, -1, 0) + mask = np.moveaxis(mask, -1, 0) + yield (masked_patch, patch, mask) + + def get_number_of_files(self) -> int: + """ + Return the number of files in the dataset. + + Returns + ------- + int + Number of files in the dataset. + """ + return len(self.data_files) + + def split_dataset( + self, + percentage: float = 0.1, + minimum_number: int = 5, + ) -> IterableDataset: + if percentage < 0 or percentage > 1: + raise ValueError(f"Percentage must be between 0 and 1, got {percentage}.") + + if minimum_number < 1 or minimum_number > self.get_number_of_files(): + raise ValueError( + f"Minimum number of files must be between 1 and " + f"{self.get_number_of_files()} (number of files), got " + f"{minimum_number}." + ) + + # compute number of files + total_files = self.get_number_of_files() + n_files = max(round(percentage * total_files), minimum_number) + + # get random indices + indices = np.random.choice(total_files, n_files, replace=False) + + # extract files + val_files = [self.data_files[i] for i in indices] + + # remove patches from self.patch + data_files = [] + for i, file in enumerate(self.data_files): + if i not in indices: + data_files.append(file) + self.data_files = data_files + + # same for targets + if self.target_files is not None: + val_target_files = [self.target_files[i] for i in indices] + + data_target_files = [] + for i, file in enumerate(self.target_files): + if i not in indices: + data_target_files.append(file) + self.target_files = data_target_files + + # clone the dataset + dataset = copy.deepcopy(self) + + # reassign patches + dataset.data_files = val_files + + # reassign targets + if self.target_files is not None: + dataset.target_files = val_target_files + + return dataset + + +# TODO: why was this calling transforms on prediction patches? class IterablePredictionDataset(IterableDataset): """ Dataset allowing extracting patches w/o loading whole data into memory. @@ -205,28 +322,29 @@ class IterablePredictionDataset(IterableDataset): def __init__( self, + data_config: DataModel, files: List[Path], - config: DataModel, - read_source_func: Optional[Callable] = None, + tile_size: Union[List[int], Tuple[int]], + tile_overlap: Optional[Union[List[int], Tuple[int]]] = None, + read_source_func: Callable = read_tiff, **kwargs, ) -> None: - super().__init__(files=files, config=config, read_source_func=read_source_func) - self.data_files = files - self.axes = config.axes - self.patch_size = config.patch_size - self.patch_extraction_method = "tiled" - self.read_source_func = read_source_func if read_source_func else read_tiff - - if not config.mean or not config.std: - self.mean, self.std = self._calculate_mean_and_std() - - self.patch_transform = get_patch_transform_predict( - patch_transforms=config.transforms, - mean=self.mean, - std=self.std, - target=False, + super().__init__( + data_config=data_config, src_files=files, read_source_func=read_source_func ) + self.patch_size = data_config.patch_size + self.tile_size = tile_size + self.tile_overlap = tile_overlap + self.read_source_func = read_source_func + + # check that mean and std are provided + if not self.mean or not self.std: + raise ValueError( + "Mean and std must be provided to the configuration in order to " + " perform prediction." + ) + def __iter__(self) -> Generator[np.ndarray, None, None]: """ Iterate over data source and yield single patch. @@ -240,12 +358,9 @@ def __iter__(self) -> Generator[np.ndarray, None, None]: self.mean is not None and self.std is not None ), "Mean and std must be provided" - for sample in self._iterate_files(): - patches = generate_patches_unsupervised( - sample, - self.axes, - self.patch_extraction_method, - self.patch_size, + for sample in self._iterate_over_files(): + patches = generate_patches_predict( + sample, self.axes, self.tile_size, self.tile_overlap ) for patch_data in patches: diff --git a/src/careamics/dataset/patching.py b/src/careamics/dataset/patching.py deleted file mode 100644 index 2764e3d4..00000000 --- a/src/careamics/dataset/patching.py +++ /dev/null @@ -1,816 +0,0 @@ -""" -Tiling submodule. - -These functions are used to tile images into patches or tiles. -""" -import itertools -from pathlib import Path -from typing import Callable, Generator, List, Optional, Tuple, Union - -import numpy as np -import zarr -from skimage.util import view_as_windows - -from ..utils.logging import get_logger -from .dataset_utils import reshape_data -from .extraction_strategy import ExtractionStrategy - -logger = get_logger(__name__) - - -def _compute_number_of_patches( - arr: np.ndarray, patch_sizes: Union[List[int], Tuple[int, ...]] -) -> Tuple[int, ...]: - """ - Compute the number of patches that fit in each dimension. - - Array must have one dimension more than the patches (C dimension). - - Parameters - ---------- - arr : np.ndarray - Input array. - patch_sizes : Tuple[int] - Size of the patches. - - Returns - ------- - Tuple[int] - Number of patches in each dimension. - """ - try: - n_patches = [ - np.ceil(arr.shape[i] / patch_sizes[i]).astype(int) - for i in range(len(patch_sizes)) - ] - except IndexError as e: - raise( - f"Patch size {patch_sizes} is not compatible with array shape {arr.shape}" - ) from e - return tuple(n_patches) - - -def _compute_overlap( - arr: np.ndarray, patch_sizes: Union[List[int], Tuple[int, ...]] -) -> Tuple[int, ...]: - """ - Compute the overlap between patches in each dimension. - - Array must be of dimensions C(Z)YX, and patches must be of dimensions YX or ZYX. - If the array dimensions are divisible by the patch sizes, then the overlap is 0. - Otherwise, it is the result of the division rounded to the upper value. - - Parameters - ---------- - arr : np.ndarray - Input array 3 or 4 dimensions. - patch_sizes : Tuple[int] - Size of the patches. - - Returns - ------- - Tuple[int] - Overlap between patches in each dimension. - """ - n_patches = _compute_number_of_patches(arr, patch_sizes) - - overlap = [ - np.ceil( - np.clip(n_patches[i] * patch_sizes[i] - arr.shape[i], 0, None) - / max(1, (n_patches[i] - 1)) - ).astype(int) - for i in range(len(patch_sizes)) - ] - return tuple(overlap) - - -def _compute_patch_steps( - patch_sizes: Union[List[int], Tuple[int, ...]], overlaps: Tuple[int, ...] -) -> Tuple[int, ...]: - """ - Compute steps between patches. - - Parameters - ---------- - patch_sizes : Tuple[int] - Size of the patches. - overlaps : Tuple[int] - Overlap between patches. - - Returns - ------- - Tuple[int] - Steps between patches. - """ - steps = [ - min(patch_sizes[i] - overlaps[i], patch_sizes[i]) - for i in range(len(patch_sizes)) - ] - return tuple(steps) - - -def _compute_reshaped_view( - arr: np.ndarray, - window_shape: Tuple[int, ...], - step: Tuple[int, ...], - output_shape: Tuple[int, ...], - target: Optional[np.ndarray] = None, -) -> np.ndarray: - """ - Compute reshaped views of an array, where views correspond to patches. - - Parameters - ---------- - arr : np.ndarray - Array from which the views are extracted. - window_shape : Tuple[int] - Shape of the views. - step : Tuple[int] - Steps between views. - output_shape : Tuple[int] - Shape of the output array. - - Returns - ------- - np.ndarray - Array with views dimension. - """ - rng = np.random.default_rng() - - if target is not None: - arr = np.stack([arr, target], axis=0) - window_shape = (arr.shape[0], *window_shape) - step = (arr.shape[0], *step) - output_shape = (arr.shape[0], -1, arr.shape[2], *output_shape[2:]) - - patches = view_as_windows(arr, window_shape=window_shape, step=step).reshape( - *output_shape - ) - if target is not None: - rng.shuffle(patches, axis=1) - else: - rng.shuffle(patches, axis=0) - return patches - - -def _patches_check_and_update( - arr: np.ndarray, - patch_size: Union[List[int], Tuple[int, ...]], - is_3d_patch: bool, -) -> None: - """ - Check patch size and array compatibility. - - This method validates the patch sizes with respect to the array dimensions: - - The patch sizes must have one dimension fewer than the array (C dimension). - - Chack that patch sizes are smaller than array dimensions. - - Parameters - ---------- - arr : np.ndarray - Input array. - patch_size : Union[List[int], Tuple[int, ...]] - Size of the patches along each dimension of the array, except the first. - is_3d_patch : bool - Whether the patch is 3D or not. - - Raises - ------ - ValueError - If the patch size is not consistent with the array shape (one more array - dimension). - ValueError - If the patch size in Z is larger than the array dimension. - ValueError - If either of the patch sizes in X or Y is larger than the corresponding array - dimension. - """ - if len(patch_size) != len(arr.shape[2:]): - raise ValueError( - f"There must be a patch size for each spatial dimensions " - f"(got {patch_size} patches for dims {arr.shape})." - ) - - # Sanity checks on patch sizes versus array dimension - if is_3d_patch and patch_size[0] > arr.shape[-3]: - raise ValueError( - f"Z patch size is inconsistent with image shape " - f"(got {patch_size[0]} patches for dim {arr.shape[1]})." - ) - - if patch_size[-2] > arr.shape[-2] or patch_size[-1] > arr.shape[-1]: - raise ValueError( - f"At least one of YX patch dimensions is inconsistent with image shape " - f"(got {patch_size} patches for dims {arr.shape[-2:]})." - ) - # Update patch size to SC(Z)YX format - return [1, arr.shape[1], *patch_size] - - -# formerly : -# in dataloader.py#L52, 00d536c -def _extract_patches_sequential( - arr: np.ndarray, - axes: str, - patch_size: Union[List[int], Tuple[int]], - target: Optional[np.ndarray] = None, -) -> Generator[np.ndarray, None, None]: - """ - Generate patches from an array in a sequential manner. - - Array dimensions should be C(Z)YX, where C can be a singleton dimension. The patches - are generated sequentially and cover the whole array. - - Parameters - ---------- - arr : np.ndarray - Input image array. - patch_size : Tuple[int] - Patch sizes in each dimension. - - Returns - ------- - Generator[np.ndarray, None, None] - Generator of patches. - """ - is_3d_patch = len(patch_size) == 3 - - # Reshape data to SCZYX - arr, _ = reshape_data(arr, axes) - - # Patches sanity check and update - patch_size = _patches_check_and_update(arr, patch_size, is_3d_patch) - - # Compute overlap - overlaps = _compute_overlap(arr=arr, patch_sizes=patch_size) - - # Create view window and overlaps - window_steps = _compute_patch_steps(patch_sizes=patch_size, overlaps=overlaps) - - output_shape = [-1,] + patch_size[1:] - - # Generate a view of the input array containing pre-calculated number of patches - # in each dimension with overlap. - # Resulting array is resized to (n_patches, C, Z, Y, X) or (n_patches, C, Y, X) - patches = _compute_reshaped_view( - arr, - window_shape=patch_size, - step=window_steps, - output_shape=output_shape, - target=target, - ) - if target is not None: - return ( - patches[0, ...], - patches[1, ...] - ) - else: - return patches, None - - -def _extract_patches_random( - arr: np.ndarray, - axes: str, - patch_size: Union[List[int], Tuple[int]], - target: Optional[np.ndarray] = None, -) -> Generator[np.ndarray, None, None]: - """ - Generate patches from an array in a random manner. - - The method calculates how many patches the image can be divided into and then - extracts an equal number of random patches. - - Parameters - ---------- - arr : np.ndarray - Input image array. - patch_size : Tuple[int] - Patch sizes in each dimension. - - Yields - ------ - Generator[np.ndarray, None, None] - Generator of patches. - """ - is_3d_patch = len(patch_size) == 3 - - arr, _ = reshape_data(arr, axes) - # Patches sanity check - patch_size = _patches_check_and_update(arr, patch_size, is_3d_patch) - - rng = np.random.default_rng() - - for sample_idx in range(arr.shape[0]): - sample = arr[sample_idx] - n_patches = np.ceil(np.prod(sample.shape) / np.prod(patch_size)).astype(int) - for _ in range(n_patches): - crop_coords = [ - rng.integers(0, sample.shape[i] - patch_size[1:][i], endpoint=True) - for i in range(len(patch_size[1:])) - ] - patch = ( - sample[ - ( - ..., - *[ - slice(c, c + patch_size[1:][i]) - for i, c in enumerate(crop_coords) - ], - ) - ] - .copy() - .astype(np.float32) - ) - if target is not None: - target_patch = ( - target[ - ( - ..., - *[ - slice(c, c + patch_size[1:][i]) - for i, c in enumerate(crop_coords) - ], - ) - ] - .copy() - .astype(np.float32) - ) - yield np.expand_dims(patch, 0), np.expand_dims(target_patch, 0) - else: - yield np.expand_dims(patch, 0), None - - -def _extract_patches_random_from_chunks( - arr: zarr.Array, - patch_size: Union[List[int], Tuple[int, ...]], - chunk_size: Union[List[int], Tuple[int, ...]], - chunk_limit: Optional[int] = None, -) -> Generator[np.ndarray, None, None]: - """ - Generate patches from an array in a random manner. - - The method calculates how many patches the image can be divided into and then - extracts an equal number of random patches. - - Parameters - ---------- - arr : np.ndarray - Input image array. - patch_size : Tuple[int] - Patch sizes in each dimension. - chunk_size : Tuple[int] - Chunk sizes to load from the. - - Yields - ------ - Generator[np.ndarray, None, None] - Generator of patches. - """ - is_3d_patch = len(patch_size) == 3 - - # Patches sanity check - patch_size = _patches_check_and_update(arr, patch_size, is_3d_patch) - - rng = np.random.default_rng() - num_chunks = chunk_limit if chunk_limit else np.prod(arr._cdata_shape) - - # Iterate over num chunks in the array - for _ in range(num_chunks): - chunk_crop_coords = [ - rng.integers(0, max(0, arr.shape[i] - chunk_size[i]), endpoint=True) - for i in range(len(chunk_size)) - ] - chunk = arr[ - ( - ..., - *[slice(c, c + chunk_size[i]) for i, c in enumerate(chunk_crop_coords)], - ) - ].squeeze() - - # Add a singleton dimension if the chunk does not have a sample dimension - if len(chunk.shape) == len(patch_size): - chunk = np.expand_dims(chunk, axis=0) - # Iterate over num samples (S) - for sample_idx in range(chunk.shape[0]): - spatial_chunk = chunk[sample_idx] - assert len(spatial_chunk.shape) == len( - patch_size - ), "Requested chunk shape is not equal to patch size" - - n_patches = np.ceil( - np.prod(spatial_chunk.shape) / np.prod(patch_size) - ).astype(int) - - # Iterate over the number of patches - for _ in range(n_patches): - patch_crop_coords = [ - rng.integers( - 0, spatial_chunk.shape[i] - patch_size[i], endpoint=True - ) - for i in range(len(patch_size)) - ] - patch = ( - spatial_chunk[ - ( - ..., - *[ - slice(c, c + patch_size[i]) - for i, c in enumerate(patch_crop_coords) - ], - ) - ] - .copy() - .astype(np.float32) - ) - yield patch - - -def _compute_crop_and_stitch_coords_1d( - axis_size: int, tile_size: int, overlap: int -) -> Tuple[List[Tuple[int, int]], ...]: - """ - Compute the coordinates of each tile along an axis, given the overlap. - - Parameters - ---------- - axis_size : int - Length of the axis. - tile_size : int - Size of the tile for the given axis. - overlap : int - Size of the overlap for the given axis. - - Returns - ------- - Tuple[Tuple[int]] - Tuple of all coordinates for given axis. - """ - # Compute the step between tiles - step = tile_size - overlap - crop_coords = [] - stitch_coords = [] - overlap_crop_coords = [] - # Iterate over the axis with a certain step - for i in range(0, max(1, axis_size - overlap), step): - # Check if the tile fits within the axis - if i + tile_size <= axis_size: - # Add the coordinates to crop one tile - crop_coords.append((i, i + tile_size)) - # Add the pixel coordinates of the cropped tile in the original image space - stitch_coords.append( - ( - i + overlap // 2 if i > 0 else 0, - i + tile_size - overlap // 2 - if crop_coords[-1][1] < axis_size - else axis_size, - ) - ) - # Add the coordinates to crop the overlap from the prediction. - overlap_crop_coords.append( - ( - overlap // 2 if i > 0 else 0, - tile_size - overlap // 2 - if crop_coords[-1][1] < axis_size - else tile_size, - ) - ) - # If the tile does not fit within the axis, perform the abovementioned - # operations starting from the end of the axis - else: - # if (axis_size - tile_size, axis_size) not in crop_coords: - crop_coords.append((max(0, axis_size - tile_size), axis_size)) - last_tile_end_coord = stitch_coords[-1][1] if stitch_coords else 1 - stitch_coords.append((last_tile_end_coord, axis_size)) - overlap_crop_coords.append( - (tile_size - (axis_size - last_tile_end_coord), tile_size) - ) - break - return crop_coords, stitch_coords, overlap_crop_coords - - -def _extract_tiles( - arr: np.ndarray, - axes: str, - tile_size: Union[List[int], Tuple[int]], - overlaps: Union[List[int], Tuple[int]], -) -> Generator: - """ - Generate tiles from the input array with specified overlap. - - The tiles cover the whole array. - - Parameters - ---------- - arr : np.ndarray - Array of shape (S, (Z), Y, X). - tile_size : Union[List[int], Tuple[int]] - Tile sizes in each dimension, of length 2 or 3. - overlaps : Union[List[int], Tuple[int]] - Overlap values in each dimension, of length 2 or 3. - - Yields - ------ - Generator - Tile generator that yields the tile with corresponding coordinates to stitch - back the tiles together. - """ - arr, _ = reshape_data(arr, axes) - - # Iterate over num samples (S) - for sample_idx in range(arr.shape[0]): - sample = arr[sample_idx] - - # Create an array of coordinates for cropping and stitching all axes. - # Shape: (axes, type_of_coord, tile_num, start/end coord) - crop_and_stitch_coords_list = [ - _compute_crop_and_stitch_coords_1d( - sample.shape[i + 1], tile_size[i], overlaps[i] - ) - for i in range(len(tile_size)) - ] - - # Rearrange crop coordinates from a list of coordinate pairs per axis to a list - # grouped by type. - # For axis of size 35 and patch size of 32 compute_crop_and_stitch_coords_1d - # will output ([(0, 32), (3, 35)], [(0, 20), (20, 35)], [(0, 20), (17, 32)]), - # where the first list is crop coordinates for 1st axis. - all_crop_coords, all_stitch_coords, all_overlap_crop_coords = zip( - *crop_and_stitch_coords_list - ) - - # Iterate over generated coordinate pairs: - for tile_idx, (crop_coords, stitch_coords, overlap_crop_coords) in enumerate( - zip( - itertools.product(*all_crop_coords), - itertools.product(*all_stitch_coords), - itertools.product(*all_overlap_crop_coords), - ) - ): - tile = sample[(..., *[slice(c[0], c[1]) for c in list(crop_coords)])] - - tile = ( - np.expand_dims(tile, 0) if "S" in axes or len(tile.shape) == 2 else tile - ) - # Check if we are at the end of the sample. - # To check that we compute the length of the array that contains all the - # tiles - if tile_idx == np.prod([len(axis) for axis in all_crop_coords]) - 1: - last_tile = True - else: - last_tile = False - yield ( - tile.astype(np.float32), - last_tile, - arr.shape[1:], - overlap_crop_coords, - stitch_coords, - ) - - -def prepare_patches_supervised( - train_files: List[Path], - target_files: List[Path], - axes: str, - patch_size: Union[List[int], Tuple[int]], - read_source_func: Optional[Callable] = None, -) -> Tuple[np.ndarray, float, float]: - """ - Iterate over data source and create an array of patches and corresponding targets. - - Returns - ------- - np.ndarray - Array of patches. - """ - train_files.sort() - target_files.sort() - - means, stds, num_samples = 0, 0, 0 - all_patches, all_targets = [], [] - for train_filename, target_filename in zip(train_files, target_files): - sample = read_source_func(train_filename, axes) - target = read_source_func(target_filename, axes) - means += sample.mean() - stds += np.std(sample) - num_samples += 1 - - # generate patches, return a generator - patches, targets = _extract_patches_sequential( - sample, axes, patch_size=patch_size, target=target - ) - - # convert generator to list and add to all_patches - all_patches.append(patches) - all_targets.append(targets) - - result_mean, result_std = means / num_samples, stds / num_samples - - all_patches = np.concatenate(all_patches, axis=0) - all_targets = np.concatenate(all_targets, axis=0) - logger.info(f"Extracted {all_patches.shape[0]} patches from input array.") - - return ( - all_patches, - all_targets, - result_mean, - result_std, - ) - - -def prepare_patches_unsupervised( - train_files: List[Path], - axes: str, - patch_size: Union[List[int], Tuple[int]], - read_source_func: Optional[Callable] = None, -) -> Tuple[np.ndarray, float, float]: - """ - Iterate over data source and create an array of patches. - - Returns - ------- - np.ndarray - Array of patches. - """ - means, stds, num_samples = 0, 0, 0 - all_patches = [] - for filename in train_files: - sample = read_source_func(filename, axes) - means += sample.mean() - stds += np.std(sample) - num_samples += 1 - - # generate patches, return a generator - patches, _ = _extract_patches_sequential(sample, axes, patch_size=patch_size) - - # convert generator to list and add to all_patches - all_patches.append(patches) - - result_mean, result_std = means / num_samples, stds / num_samples - return np.concatenate(all_patches), _, result_mean, result_std - - -def generate_patches_supervised( - sample: Union[np.ndarray, zarr.Array], - axes: str, - patch_extraction_method: ExtractionStrategy, - patch_size: Optional[Union[List[int], Tuple[int]]] = None, - patch_overlap: Optional[Union[List[int], Tuple[int]]] = None, - target: Optional[Union[np.ndarray, zarr.Array]] = None, -) -> Generator[np.ndarray, None, None]: - """ - Creates an iterator with patches and corresponding targets from a sample. - - Parameters - ---------- - sample : np.ndarray - Input array. - patch_extraction_method : ExtractionStrategies - Patch extraction method, as defined in extraction_strategy.ExtractionStrategy. - patch_size : Optional[Union[List[int], Tuple[int]]] - Size of the patches along each dimension of the array, except the first. - patch_overlap : Optional[Union[List[int], Tuple[int]]] - Overlap between patches. - - Returns - ------- - Generator[np.ndarray, None, None] - Generator yielding patches/tiles. - - Raises - ------ - ValueError - If overlap is not specified when using tiling. - ValueError - If patches is None. - """ - patches = None - targets = None - - if patch_size is not None: - patches = None - - if patch_extraction_method == ExtractionStrategy.TILED: - if patch_overlap is None: - raise ValueError( - "Overlaps must be specified when using tiling (got None)." - ) - patches = _extract_tiles( - arr=sample, axes=axes, tile_size=patch_size, overlaps=patch_overlap - ) - - elif patch_extraction_method == ExtractionStrategy.SEQUENTIAL: - patches, targets = _extract_patches_sequential( - arr=sample, axes=axes, patch_size=patch_size, target=target - ) - - elif patch_extraction_method == ExtractionStrategy.RANDOM: - # Returns a generator of patches and targets(if present) - patches = _extract_patches_random( - arr=sample, axes=axes, patch_size=patch_size, target=target - ) - - elif patch_extraction_method == ExtractionStrategy.RANDOM_ZARR: - # Returns a generator of patches and targets(if present) - patches = _extract_patches_random_from_chunks( - sample, patch_size=patch_size, chunk_size=sample.chunks - ) - - if patches is None: - raise ValueError("No patch generated") - - return patches, targets - else: - # no patching - return (sample for _ in range(1)), target - - -def generate_patches_unsupervised( - sample: Union[np.ndarray, zarr.Array], - axes: str, - patch_extraction_method: ExtractionStrategy, - patch_size: Optional[Union[List[int], Tuple[int]]] = None, - patch_overlap: Optional[Union[List[int], Tuple[int]]] = None, -) -> Generator[np.ndarray, None, None]: - """ - Creates an iterator over patches from a sample. - - Parameters - ---------- - sample : np.ndarray - Input array. - patch_extraction_method : ExtractionStrategies - Patch extraction method, as defined in extraction_strategy.ExtractionStrategy. - patch_size : Optional[Union[List[int], Tuple[int]]] - Size of the patches along each dimension of the array, except the first. - patch_overlap : Optional[Union[List[int], Tuple[int]]] - Overlap between patches. - - Returns - ------- - Generator[np.ndarray, None, None] - Generator yielding patches/tiles. - - Raises - ------ - ValueError - If overlap is not specified when using tiling. - ValueError - If patches is None. - """ - patches = None - - if patch_extraction_method is not None: - patches = None - - if patch_extraction_method == ExtractionStrategy.TILED: - if patch_overlap is None: - patch_overlap = [48] * len(patch_size)# TODO calculate OL from model - patches = _extract_tiles( - arr=sample, axes=axes, tile_size=patch_size, overlaps=patch_overlap - ) - # TODO split so there's no extraciton strat param - elif patch_extraction_method == ExtractionStrategy.RANDOM: - # Returns a generator of patches and targets(if present) - patches = _extract_patches_random(sample, patch_size=patch_size) - - elif patch_extraction_method == ExtractionStrategy.RANDOM_ZARR: - # Returns a generator of patches and targets(if present) - patches = _extract_patches_random_from_chunks( - sample, patch_size=patch_size, chunk_size=sample.chunks - ) - - else: - raise ValueError("Invalid patch extraction method") - - if patches is None: - raise ValueError("No patch generated") - - return patches - else: - # no patching. sample should have channel dimension - return (sample for _ in range(1)) - - -def generate_patches_predict( - sample: np.ndarray, - axes: str, - tile_size: Union[List[int], Tuple[int]], - tile_overlap: Union[List[int], Tuple[int]], -) -> Tuple[np.ndarray, float, float]: - """ - Iterate over data source and create an array of patches. - - Returns - ------- - np.ndarray - Array of patches. - """ - # generate patches, return a generator - patches = _extract_tiles( - arr=sample, axes=axes, tile_size=tile_size, overlaps=tile_overlap - ) - patches_list = list(patches) - if len(patches_list) == 0: - raise ValueError("No patch generated") - - return patches_list diff --git a/src/careamics/dataset/patching/__init__.py b/src/careamics/dataset/patching/__init__.py new file mode 100644 index 00000000..e7755b8e --- /dev/null +++ b/src/careamics/dataset/patching/__init__.py @@ -0,0 +1,6 @@ +from .patch_transform import get_patch_transform, get_patch_transform_predict +from .patching import ( + generate_patches_predict, + generate_patches_supervised, + generate_patches_unsupervised, +) diff --git a/src/careamics/dataset/patching/patch_transform.py b/src/careamics/dataset/patching/patch_transform.py new file mode 100644 index 00000000..003e8b96 --- /dev/null +++ b/src/careamics/dataset/patching/patch_transform.py @@ -0,0 +1,197 @@ +from typing import Callable, List, Union + +import albumentations as Aug + +from careamics.config.support import get_all_transforms +from careamics.config.transform_model import TransformModel + + +# TODO add some explanations on how the additional_targets is used +def get_patch_transform( + patch_transforms: Union[List[TransformModel], Aug.Compose], + with_target: bool, + normalize_mask: bool = True, +) -> Aug.Compose: + # if we passed a Compose, we just return it + if isinstance(patch_transforms, Aug.Compose): + return patch_transforms + + # empty list of transforms is a NoOp + elif len(patch_transforms) == 0: + return Aug.Compose( + [Aug.NoOp()], + additional_targets={}, # TODO this part need be checked again (wrt segmentation) + ) + + # else we have a list of transforms + else: + # retrieve all transforms + all_transforms = get_all_transforms() + + # instantiate all transforms + transforms = [ + all_transforms[transform.name](**transform.parameters) + for transform in patch_transforms + ] + + return Aug.Compose( + transforms, + # TODO add when will be supporting targets + # to apply image aug to the object passed to the transform as + # keyword "target" + # additional_targets={"target": "image"} + # if (with_target and normalize_mask) # TODO check this + # else {}, + ) + + +# TODO clarify this function +def _get_patch_transform( + patch_transforms: Union[List[TransformModel], Aug.Compose], + mean: float, + std: float, + target: bool, + normalize_mask: bool = True, +) -> Aug.Compose: + """Return a pixel manipulation function. + + Used in N2V family of algorithms. + + Parameters + ---------- + patch_transform_type : str + Type of patch transform. + target : bool + Whether the transform is applied to the target(if the target is present). + mode : str + Train or predict mode. + + Returns + ------- + Union[None, Callable] + Patch transform function. + """ + if patch_transforms is None: + return Aug.Compose( + [Aug.NoOp()], + additional_targets={"target": "image"} + if (target and normalize_mask) # TODO why? there is no normalization here? + else {}, + ) + elif isinstance(patch_transforms, list): + patch_transforms[[t["name"] for t in patch_transforms].index("Normalize")][ + "parameters" + ] = { + "mean": mean, + "std": std, + "max_pixel_value": 1, # TODO why? mean/std normalization will not be lead to [-1,1] range + } + # TODO not very readable + return Aug.Compose( + [ + get_all_transforms()[transform["name"]](**transform["parameters"]) + if "parameters" in transform + else get_all_transforms()[transform["name"]]() + for transform in patch_transforms + ], + additional_targets={"target": "image"} + if (target and normalize_mask) + else {}, + ) + elif isinstance(patch_transforms, Aug.Compose): + return Aug.Compose( + [ + t + for t in patch_transforms.transforms[:-1] + if not isinstance(t, Aug.Normalize) + ] + + [ + Aug.Normalize(mean=mean, std=std, max_pixel_value=1), + patch_transforms.transforms[-1] + if patch_transforms.transforms[-1].__class__.__name__ == "ManipulateN2V" + else Aug.NoOp(), + ], + additional_targets={"target": "image"} + if (target and normalize_mask) + else {}, + ) + else: + raise ValueError( + f"Incorrect patch transform type {patch_transforms}. " + f"Please refer to the documentation." # TODO add link to documentation + ) + + +# TODO add tta +def get_patch_transform_predict( + patch_transforms: Union[List, Aug.Compose, None], + mean: float, + std: float, + target: bool, + normalize_mask: bool = True, +) -> Union[None, Callable]: + """Return a pixel manipulation function. + + Used in N2V family of algorithms. + + Parameters + ---------- + patch_transform_type : str + Type of patch transform. + target : bool + Whether the transform is applied to the target(if the target is present). + mode : str + Train or predict mode. + + Returns + ------- + Union[None, Callable] + Patch transform function. + """ + if patch_transforms is None: + return Aug.Compose( + [Aug.NoOp()], + additional_targets={"target": "image"} + if (target and normalize_mask) + else {}, + ) + elif isinstance(patch_transforms, list): + patch_transforms[[t["name"] for t in patch_transforms].index("Normalize")][ + "parameters" + ] = { + "mean": mean, + "std": std, + "max_pixel_value": 1, + } + # TODO not very readable + return Aug.Compose( + [ + get_all_transforms()[transform["name"]](**transform["parameters"]) + if "parameters" in transform + else get_all_transforms()[transform["name"]]() + for transform in patch_transforms + if transform["name"] != "ManipulateN2V" + ], + additional_targets={"target": "image"} + if (target and normalize_mask) + else {}, + ) + elif isinstance(patch_transforms, Aug.Compose): + return Aug.Compose( + [ + t + for t in patch_transforms.transforms[:-1] + if not isinstance(t, Aug.Normalize) + ] + + [ + Aug.Normalize(mean=mean, std=std, max_pixel_value=1), + ], + additional_targets={"target": "image"} + if (target and normalize_mask) + else {}, + ) + else: + raise ValueError( + f"Incorrect patch transform type {patch_transforms}. " + f"Please refer to the documentation." # TODO add link to documentation + ) diff --git a/src/careamics/dataset/patching/patching.py b/src/careamics/dataset/patching/patching.py new file mode 100644 index 00000000..26c2404f --- /dev/null +++ b/src/careamics/dataset/patching/patching.py @@ -0,0 +1,374 @@ +""" +Tiling submodule. + +These functions are used to tile images into patches or tiles. +""" +from pathlib import Path +from typing import Callable, Generator, List, Optional, Tuple, Union + +import numpy as np +import zarr + +from ...config.support.supported_extraction_strategies import ( + SupportedExtractionStrategy, +) +from ...utils.logging import get_logger +from ..dataset_utils import reshape_array +from .random_patching import extract_patches_random, extract_patches_random_from_chunks +from .sequential_patching import extract_patches_sequential +from .tiled_patching import extract_tiles + +logger = get_logger(__name__) + + +# called by in memory dataset +def prepare_patches_supervised( + train_files: List[Path], + target_files: List[Path], + axes: str, + patch_size: Union[List[int], Tuple[int]], + read_source_func: Optional[Callable] = None, +) -> Tuple[np.ndarray, float, float]: + """ + Iterate over data source and create an array of patches and corresponding targets. + + Returns + ------- + np.ndarray + Array of patches. + """ + train_files.sort() + target_files.sort() + + means, stds, num_samples = 0, 0, 0 + all_patches, all_targets = [], [] + for train_filename, target_filename in zip(train_files, target_files): + try: + sample: np.ndarray = read_source_func(train_filename, axes) + target: np.ndarray = read_source_func(target_filename, axes) + means += sample.mean() + stds += sample.std() + num_samples += 1 + + # reshape array + sample = reshape_array(sample, axes) + target = reshape_array(target, axes) + + # generate patches, return a generator + patches, targets = extract_patches_sequential( + sample, patch_size=patch_size, target=target + ) + + # convert generator to list and add to all_patches + all_patches.append(patches) + all_targets.append(targets) + + except Exception as e: + # emit warning and continue + logger.error(f"Failed to read {train_filename} or {target_filename}: {e}") + + # raise error if no valid samples found + if num_samples == 0: + raise ValueError( + f"No valid samples found in the input data: {train_files} and " + f"{target_files}." + ) + + result_mean, result_std = means / num_samples, stds / num_samples + + all_patches = np.concatenate(all_patches, axis=0) + all_targets = np.concatenate(all_targets, axis=0) + logger.info(f"Extracted {all_patches.shape[0]} patches from input array.") + + return ( + all_patches, + all_targets, + result_mean, + result_std, + ) + + +# called by in memory dataset +def prepare_patches_unsupervised( + train_files: List[Path], + axes: str, + patch_size: Union[List[int], Tuple[int]], + read_source_func: Optional[Callable] = None, +) -> Tuple[np.ndarray, float, float]: + """ + Iterate over data source and create an array of patches. + + Returns + ------- + np.ndarray + Array of patches. + """ + means, stds, num_samples = 0, 0, 0 + all_patches = [] + for filename in train_files: + try: + sample: np.ndarray = read_source_func(filename, axes) + means += sample.mean() + stds += sample.std() + num_samples += 1 + + # reshape array + sample = reshape_array(sample, axes) + + # generate patches, return a generator + patches, _ = extract_patches_sequential(sample, patch_size=patch_size) + + # convert generator to list and add to all_patches + all_patches.append(patches) + except Exception as e: + # emit warning and continue + logger.error(f"Failed to read {filename}: {e}") + + # raise error if no valid samples found + if num_samples == 0: + raise ValueError(f"No valid samples found in the input data: {train_files}.") + + result_mean, result_std = means / num_samples, stds / num_samples + + return np.concatenate(all_patches), _, result_mean, result_std + + +# called on arrays by in memory dataset +def prepare_patches_supervised_array( + data: np.ndarray, + axes: str, + data_target: np.ndarray, + patch_size: Union[List[int], Tuple[int]], +) -> Tuple[np.ndarray, float, float]: + # compute statistics + mean = data.mean() + std = data.std() + + # reshape array + sample = reshape_array(data, axes) + + # generate patches, return a generator + patches, patch_targets = extract_patches_sequential( + sample, patch_size=patch_size, target=data_target + ) + + logger.info(f"Extracted {patches.shape[0]} patches from input array.") + + return ( + patches, + patch_targets, + mean, + std, + ) + + +# called by in memory dataset +def prepare_patches_unsupervised_array( + data: np.ndarray, + axes: str, + patch_size: Union[List[int], Tuple[int]], +) -> Tuple[np.ndarray, float, float]: + """ + Iterate over data source and create an array of patches. + + This method expects an array of shape SC(Z)YX, where S and C can be singleton + dimensions. + + # TODO what dims does it return? + + Returns + ------- + np.ndarray + Array of patches. + """ + # calculate mean and std + mean = data.mean() + std = data.std() + + # reshape array + sample = reshape_array(data, axes) + + # generate patches, return a generator + patches, _ = extract_patches_sequential(sample, patch_size=patch_size) + + return patches, _, mean, std + + +# prediction, both in memory and iterable +def generate_patches_predict( + sample: np.ndarray, + axes: str, + tile_size: Union[List[int], Tuple[int]], + tile_overlap: Union[List[int], Tuple[int]], +) -> Tuple[np.ndarray, float, float]: + """ + Iterate over data source and create an array of patches. + + Returns + ------- + np.ndarray + Array of patches. + """ + # generate patches, return a generator + patches = extract_tiles( + arr=sample, axes=axes, tile_size=tile_size, overlaps=tile_overlap + ) + patches_list = list(patches) + if len(patches_list) == 0: + raise ValueError("No patch generated") + + return patches_list + + +# iterator over files +def generate_patches_supervised( + sample: Union[np.ndarray, zarr.Array], + axes: str, + patch_extraction_method: SupportedExtractionStrategy, + patch_size: Optional[Union[List[int], Tuple[int]]] = None, + patch_overlap: Optional[Union[List[int], Tuple[int]]] = None, + target: Optional[Union[np.ndarray, zarr.Array]] = None, +) -> Generator[np.ndarray, None, None]: + """ + Creates an iterator with patches and corresponding targets from a sample. + + Parameters + ---------- + sample : np.ndarray + Input array. + patch_extraction_method : ExtractionStrategies + Patch extraction method, as defined in extraction_strategy.ExtractionStrategy. + patch_size : Optional[Union[List[int], Tuple[int]]] + Size of the patches along each dimension of the array, except the first. + patch_overlap : Optional[Union[List[int], Tuple[int]]] + Overlap between patches. + + Returns + ------- + Generator[np.ndarray, None, None] + Generator yielding patches/tiles. + + Raises + ------ + ValueError + If overlap is not specified when using tiling. + ValueError + If patches is None. + """ + patches = None + targets = None + + if patch_size is not None: + patches = None + + if patch_extraction_method == SupportedExtractionStrategy.TILED: + if patch_overlap is None: + raise ValueError( + "Overlaps must be specified when using tiling (got None)." + ) + patches = extract_tiles( + arr=sample, axes=axes, tile_size=patch_size, overlaps=patch_overlap + ) + + elif patch_extraction_method == SupportedExtractionStrategy.SEQUENTIAL: + patches, targets = extract_patches_sequential( + arr=sample, patch_size=patch_size, target=target + ) + + elif patch_extraction_method == SupportedExtractionStrategy.RANDOM: + # Returns a generator of patches and targets(if present) + patches = extract_patches_random( + arr=sample, patch_size=patch_size, target=target + ) + + elif patch_extraction_method == SupportedExtractionStrategy.RANDOM_ZARR: + # Returns a generator of patches and targets(if present) + patches = extract_patches_random_from_chunks( + sample, patch_size=patch_size, chunk_size=sample.chunks + ) + + if patches is None: + raise ValueError("No patch generated") + + return patches, targets + else: + # no patching + return (sample for _ in range(1)), target + + +# iterator over files +def generate_patches_unsupervised( + sample: Union[np.ndarray, zarr.Array], + axes: str, + patch_extraction_method: SupportedExtractionStrategy, + patch_size: Optional[Union[List[int], Tuple[int]]] = None, + patch_overlap: Optional[Union[List[int], Tuple[int]]] = None, +) -> Generator[np.ndarray, None, None]: + """ + Creates an iterator over patches from a sample. + + # TODO what dimensions does it return? Is there S? + # TODO if there might be S, then maybe we can split into different functions + + Parameters + ---------- + sample : np.ndarray + Input array. + patch_extraction_method : ExtractionStrategies + Patch extraction method, as defined in extraction_strategy.ExtractionStrategy. + patch_size : Optional[Union[List[int], Tuple[int]]] + Size of the patches along each dimension of the array, except the first. + patch_overlap : Optional[Union[List[int], Tuple[int]]] + Overlap between patches. + + Returns + ------- + Generator[np.ndarray, None, None] + Generator yielding patches/tiles. + + Raises + ------ + ValueError + If overlap is not specified when using tiling. + ValueError + If patches is None. + """ + # if tiled (patches with overlaps) + if patch_extraction_method == SupportedExtractionStrategy.TILED: + if patch_overlap is None: + patch_overlap = [48] * len(patch_size) # TODO pass overlap instead + + # return a Generator of the following: + # - patch: np.ndarray, dims SC(Z)YX + # - last_tile: bool + # - shape: Tuple[int], shape of a tile, excluding the S dimension + # - overlap_crop_coords: coordinates used to crop the patch during stitching + # - stitch_coords: coordinates used to stitch the tiles back to the full image + patches = extract_tiles( + arr=sample, axes=axes, tile_size=patch_size, overlaps=patch_overlap + ) + + # random extraction + elif patch_extraction_method == SupportedExtractionStrategy.RANDOM: + # return a Generator that yields the following: + # - patch: np.ndarray, dimension C(Z)YX + # - target_patch: np.ndarray, dimension C(Z)YX, or None + patches = extract_patches_random(sample, patch_size=patch_size) + + # zarr specific random extraction + elif patch_extraction_method == SupportedExtractionStrategy.RANDOM_ZARR: + # # Returns a generator of patches and targets(if present) + # patches = extract_patches_random_from_chunks( + # sample, patch_size=patch_size, chunk_size=sample.chunks + # ) + raise NotImplementedError("Random zarr extraction not implemented yet.") + + # no patching, return sample + elif patch_extraction_method == SupportedExtractionStrategy.NONE: + patches = (sample for _ in range(1)) + + # no extraction method + else: + raise ValueError("Invalid patch extraction method.") + + return patches diff --git a/src/careamics/dataset/patching/random_patching.py b/src/careamics/dataset/patching/random_patching.py new file mode 100644 index 00000000..3069783f --- /dev/null +++ b/src/careamics/dataset/patching/random_patching.py @@ -0,0 +1,182 @@ +from typing import Generator, List, Optional, Tuple, Union + +import numpy as np +import zarr + +from .validate_patch_dimension import validate_patch_dimensions + + +# TODO this should not be responsible for reshaping, split into different functions +def extract_patches_random( + arr: np.ndarray, + patch_size: Union[List[int], Tuple[int]], + target: Optional[np.ndarray] = None, +) -> Generator[Tuple[np.ndarray, ...], None, None]: + """ + Generate patches from an array in a random manner. + + The method calculates how many patches the image can be divided into and then + extracts an equal number of random patches. + + It returns a generator that yields the following: + + - patch: np.ndarray, dimension C(Z)YX. + - target_patch: np.ndarray, dimension C(Z)YX, if the target is present, None + otherwise. + + Parameters + ---------- + arr : np.ndarray + Input image array. + patch_size : Tuple[int] + Patch sizes in each dimension. + + Yields + ------ + Generator[np.ndarray, None, None] + Generator of patches. + """ + is_3d_patch = len(patch_size) == 3 + + # patches sanity check + patch_size = validate_patch_dimensions(arr, patch_size, is_3d_patch) + + # random generator + rng = np.random.default_rng() + + # iterate over the number of samples (S or T) + for sample_idx in range(arr.shape[0]): + # get sample array + sample = arr[sample_idx] + + # calculate the number of patches + n_patches = np.ceil(np.prod(sample.shape) / np.prod(patch_size)).astype(int) + + # iterate over the number of patches + for _ in range(n_patches): + # get crop coordinates + crop_coords = [ + rng.integers(0, sample.shape[i] - patch_size[1:][i], endpoint=True) + for i in range(len(patch_size[1:])) + ] + + # extract patch + patch = ( + sample[ + ( + ..., + *[ + slice(c, c + patch_size[1:][i]) + for i, c in enumerate(crop_coords) + ], + ) + ] + .copy() + .astype(np.float32) + ) + + # same for target + if target is not None: + target_patch = ( + target[ + ( + ..., + *[ + slice(c, c + patch_size[1:][i]) + for i, c in enumerate(crop_coords) + ], + ) + ] + .copy() + .astype(np.float32) + ) + # return patch and target patch + yield patch, target_patch + else: + # return patch + yield patch, None + + +def extract_patches_random_from_chunks( + arr: zarr.Array, + patch_size: Union[List[int], Tuple[int, ...]], + chunk_size: Union[List[int], Tuple[int, ...]], + chunk_limit: Optional[int] = None, +) -> Generator[np.ndarray, None, None]: + """ + Generate patches from an array in a random manner. + + The method calculates how many patches the image can be divided into and then + extracts an equal number of random patches. + + Parameters + ---------- + arr : np.ndarray + Input image array. + patch_size : Tuple[int] + Patch sizes in each dimension. + chunk_size : Tuple[int] + Chunk sizes to load from the. + + Yields + ------ + Generator[np.ndarray, None, None] + Generator of patches. + """ + is_3d_patch = len(patch_size) == 3 + + # Patches sanity check + patch_size = validate_patch_dimensions(arr, patch_size, is_3d_patch) + + rng = np.random.default_rng() + num_chunks = chunk_limit if chunk_limit else np.prod(arr._cdata_shape) + + # Iterate over num chunks in the array + for _ in range(num_chunks): + chunk_crop_coords = [ + rng.integers(0, max(0, arr.shape[i] - chunk_size[i]), endpoint=True) + for i in range(len(chunk_size)) + ] + chunk = arr[ + ( + ..., + *[slice(c, c + chunk_size[i]) for i, c in enumerate(chunk_crop_coords)], + ) + ].squeeze() + + # Add a singleton dimension if the chunk does not have a sample dimension + if len(chunk.shape) == len(patch_size): + chunk = np.expand_dims(chunk, axis=0) + # Iterate over num samples (S) + for sample_idx in range(chunk.shape[0]): + spatial_chunk = chunk[sample_idx] + assert len(spatial_chunk.shape) == len( + patch_size + ), "Requested chunk shape is not equal to patch size" + + n_patches = np.ceil( + np.prod(spatial_chunk.shape) / np.prod(patch_size) + ).astype(int) + + # Iterate over the number of patches + for _ in range(n_patches): + patch_crop_coords = [ + rng.integers( + 0, spatial_chunk.shape[i] - patch_size[i], endpoint=True + ) + for i in range(len(patch_size)) + ] + patch = ( + spatial_chunk[ + ( + ..., + *[ + slice(c, c + patch_size[i]) + for i, c in enumerate(patch_crop_coords) + ], + ) + ] + .copy() + .astype(np.float32) + ) + yield patch diff --git a/src/careamics/dataset/patching/sequential_patching.py b/src/careamics/dataset/patching/sequential_patching.py new file mode 100644 index 00000000..f8e95cab --- /dev/null +++ b/src/careamics/dataset/patching/sequential_patching.py @@ -0,0 +1,198 @@ +from typing import Generator, List, Optional, Tuple, Union + +import numpy as np +from skimage.util import view_as_windows + +from .validate_patch_dimension import validate_patch_dimensions + + +def _compute_number_of_patches( + arr: np.ndarray, patch_sizes: Union[List[int], Tuple[int, ...]] +) -> Tuple[int, ...]: + """ + Compute the number of patches that fit in each dimension. + + Array must have one dimension more than the patches (C dimension). + + Parameters + ---------- + arr : np.ndarray + Input array. + patch_sizes : Tuple[int] + Size of the patches. + + Returns + ------- + Tuple[int] + Number of patches in each dimension. + """ + try: + n_patches = [ + np.ceil(arr.shape[i] / patch_sizes[i]).astype(int) + for i in range(len(patch_sizes)) + ] + except IndexError as e: + raise ( + f"Patch size {patch_sizes} is not compatible with array shape {arr.shape}" + ) from e + return tuple(n_patches) + + +def _compute_overlap( + arr: np.ndarray, patch_sizes: Union[List[int], Tuple[int, ...]] +) -> Tuple[int, ...]: + """ + Compute the overlap between patches in each dimension. + + Array must be of dimensions C(Z)YX, and patches must be of dimensions YX or ZYX. + If the array dimensions are divisible by the patch sizes, then the overlap is 0. + Otherwise, it is the result of the division rounded to the upper value. + + Parameters + ---------- + arr : np.ndarray + Input array 3 or 4 dimensions. + patch_sizes : Tuple[int] + Size of the patches. + + Returns + ------- + Tuple[int] + Overlap between patches in each dimension. + """ + n_patches = _compute_number_of_patches(arr, patch_sizes) + + overlap = [ + np.ceil( + np.clip(n_patches[i] * patch_sizes[i] - arr.shape[i], 0, None) + / max(1, (n_patches[i] - 1)) + ).astype(int) + for i in range(len(patch_sizes)) + ] + return tuple(overlap) + + +def _compute_patch_steps( + patch_sizes: Union[List[int], Tuple[int, ...]], overlaps: Tuple[int, ...] +) -> Tuple[int, ...]: + """ + Compute steps between patches. + + Parameters + ---------- + patch_sizes : Tuple[int] + Size of the patches. + overlaps : Tuple[int] + Overlap between patches. + + Returns + ------- + Tuple[int] + Steps between patches. + """ + steps = [ + min(patch_sizes[i] - overlaps[i], patch_sizes[i]) + for i in range(len(patch_sizes)) + ] + return tuple(steps) + + +# TODO why stack the target here and not on a different dimension before calling this function? +def _compute_reshaped_view( + arr: np.ndarray, + window_shape: Tuple[int, ...], + step: Tuple[int, ...], + output_shape: Tuple[int, ...], + target: Optional[np.ndarray] = None, +) -> np.ndarray: + """ + Compute reshaped views of an array, where views correspond to patches. + + Parameters + ---------- + arr : np.ndarray + Array from which the views are extracted. + window_shape : Tuple[int] + Shape of the views. + step : Tuple[int] + Steps between views. + output_shape : Tuple[int] + Shape of the output array. + + Returns + ------- + np.ndarray + Array with views dimension. + """ + rng = np.random.default_rng() + + if target is not None: + arr = np.stack([arr, target], axis=0) + window_shape = (arr.shape[0], *window_shape) + step = (arr.shape[0], *step) + output_shape = (arr.shape[0], -1, arr.shape[2], *output_shape[2:]) + + patches = view_as_windows(arr, window_shape=window_shape, step=step).reshape( + *output_shape + ) + if target is not None: + rng.shuffle(patches, axis=1) + else: + rng.shuffle(patches, axis=0) + return patches + + +def extract_patches_sequential( + arr: np.ndarray, + patch_size: Union[List[int], Tuple[int]], + target: Optional[np.ndarray] = None, +) -> Generator[Tuple[np.ndarray, ...], None, None]: + """ + Generate patches from an array in a sequential manner. + + Array dimensions should be SC(Z)YX, where C can be a singleton dimension. The patches + are generated sequentially and cover the whole array. + + Parameters + ---------- + arr : np.ndarray + Input image array. + patch_size : Tuple[int] + Patch sizes in each dimension. + + Returns + ------- + Generator[Tuple[np.ndarray, ...], None, None] + Generator of patches. + """ + is_3d_patch = len(patch_size) == 3 + + # Patches sanity check and update + patch_size = validate_patch_dimensions(arr, patch_size, is_3d_patch) + + # Compute overlap + overlaps = _compute_overlap(arr=arr, patch_sizes=patch_size) + + # Create view window and overlaps + window_steps = _compute_patch_steps(patch_sizes=patch_size, overlaps=overlaps) + + output_shape = [ + -1, + ] + patch_size[1:] + + # Generate a view of the input array containing pre-calculated number of patches + # in each dimension with overlap. + # Resulting array is resized to (n_patches, C, Z, Y, X) or (n_patches, C, Y, X) + patches = _compute_reshaped_view( + arr, + window_shape=patch_size, + step=window_steps, + output_shape=output_shape, + target=target, + ) + + if target is not None: + # target was concatenated to patches in _compute_reshaped_view + return (patches[0, ...], patches[1, ...]) + else: + return patches, None diff --git a/src/careamics/dataset/patching/tiled_patching.py b/src/careamics/dataset/patching/tiled_patching.py new file mode 100644 index 00000000..e7031906 --- /dev/null +++ b/src/careamics/dataset/patching/tiled_patching.py @@ -0,0 +1,154 @@ +import itertools +from typing import Generator, List, Tuple, Union + +import numpy as np + + +def _compute_crop_and_stitch_coords_1d( + axis_size: int, tile_size: int, overlap: int +) -> Tuple[List[Tuple[int, int]], ...]: + """ + Compute the coordinates of each tile along an axis, given the overlap. + + Parameters + ---------- + axis_size : int + Length of the axis. + tile_size : int + Size of the tile for the given axis. + overlap : int + Size of the overlap for the given axis. + + Returns + ------- + Tuple[Tuple[int]] + Tuple of all coordinates for given axis. + """ + # Compute the step between tiles + step = tile_size - overlap + crop_coords = [] + stitch_coords = [] + overlap_crop_coords = [] + # Iterate over the axis with a certain step + for i in range(0, max(1, axis_size - overlap), step): + # Check if the tile fits within the axis + if i + tile_size <= axis_size: + # Add the coordinates to crop one tile + crop_coords.append((i, i + tile_size)) + # Add the pixel coordinates of the cropped tile in the original image space + stitch_coords.append( + ( + i + overlap // 2 if i > 0 else 0, + i + tile_size - overlap // 2 + if crop_coords[-1][1] < axis_size + else axis_size, + ) + ) + # Add the coordinates to crop the overlap from the prediction. + overlap_crop_coords.append( + ( + overlap // 2 if i > 0 else 0, + tile_size - overlap // 2 + if crop_coords[-1][1] < axis_size + else tile_size, + ) + ) + # If the tile does not fit within the axis, perform the abovementioned + # operations starting from the end of the axis + else: + # if (axis_size - tile_size, axis_size) not in crop_coords: + crop_coords.append((max(0, axis_size - tile_size), axis_size)) + last_tile_end_coord = stitch_coords[-1][1] if stitch_coords else 1 + stitch_coords.append((last_tile_end_coord, axis_size)) + overlap_crop_coords.append( + (tile_size - (axis_size - last_tile_end_coord), tile_size) + ) + break + return crop_coords, stitch_coords, overlap_crop_coords + + +# TODO is S in there? +def extract_tiles( + arr: np.ndarray, + axes: str, + tile_size: Union[List[int], Tuple[int]], + overlaps: Union[List[int], Tuple[int]], +) -> Generator: + """ + Generate tiles from the input array with specified overlap. + + The tiles cover the whole array. The method returns a generator that yields the + following: + + - tile: np.ndarray, dimension SC(Z)YX. + - last_tile: bool, whether this is the last tile. + - shape: Tuple[int], shape of a tile, excluding the S dimension. + - overlap_crop_coords: Tuple[int], coordinates uset to crop the patch during + stitching. + - stitch_coords: Tuple[int], coordinates used to stitch the tiles back to the full + image. + + Parameters + ---------- + arr : np.ndarray + Array of shape (S, (Z), Y, X). + tile_size : Union[List[int], Tuple[int]] + Tile sizes in each dimension, of length 2 or 3. + overlaps : Union[List[int], Tuple[int]] + Overlap values in each dimension, of length 2 or 3. + + Yields + ------ + Generator + Tile generator that yields the tile with corresponding coordinates to stitch + back the tiles together. + """ + # Iterate over num samples (S) + for sample_idx in range(arr.shape[0]): + sample = arr[sample_idx] + + # Create an array of coordinates for cropping and stitching all axes. + # Shape: (axes, type_of_coord, tile_num, start/end coord) + crop_and_stitch_coords_list = [ + _compute_crop_and_stitch_coords_1d( + sample.shape[i + 1], tile_size[i], overlaps[i] + ) + for i in range(len(tile_size)) + ] + + # Rearrange crop coordinates from a list of coordinate pairs per axis to a list + # grouped by type. + # For axis of size 35 and patch size of 32 compute_crop_and_stitch_coords_1d + # will output ([(0, 32), (3, 35)], [(0, 20), (20, 35)], [(0, 20), (17, 32)]), + # where the first list is crop coordinates for 1st axis. + all_crop_coords, all_stitch_coords, all_overlap_crop_coords = zip( + *crop_and_stitch_coords_list + ) + + # Iterate over generated coordinate pairs: + for tile_idx, (crop_coords, stitch_coords, overlap_crop_coords) in enumerate( + zip( + itertools.product(*all_crop_coords), + itertools.product(*all_stitch_coords), + itertools.product(*all_overlap_crop_coords), + ) + ): + tile = sample[(..., *[slice(c[0], c[1]) for c in list(crop_coords)])] + + tile = ( + np.expand_dims(tile, 0) if "S" in axes or len(tile.shape) == 2 else tile + ) + # Check if we are at the end of the sample. + # To check that we compute the length of the array that contains all the + # tiles + if tile_idx == np.prod([len(axis) for axis in all_crop_coords]) - 1: + last_tile = True + else: + last_tile = False + yield ( + tile.astype(np.float32), + last_tile, + arr.shape[1:], + overlap_crop_coords, + stitch_coords, + ) diff --git a/src/careamics/dataset/patching/validate_patch_dimension.py b/src/careamics/dataset/patching/validate_patch_dimension.py new file mode 100644 index 00000000..2e389825 --- /dev/null +++ b/src/careamics/dataset/patching/validate_patch_dimension.py @@ -0,0 +1,58 @@ +from typing import List, Tuple, Union + +import numpy as np + + +def validate_patch_dimensions( + arr: np.ndarray, + patch_size: Union[List[int], Tuple[int, ...]], + is_3d_patch: bool, +) -> Tuple[int, ...]: + """ + Check patch size and array compatibility. + + This method validates the patch sizes with respect to the array dimensions: + - The patch sizes must have one dimension fewer than the array (C dimension). + - Chack that patch sizes are smaller than array dimensions. + + Parameters + ---------- + arr : np.ndarray + Input array. + patch_size : Union[List[int], Tuple[int, ...]] + Size of the patches along each dimension of the array, except the first. + is_3d_patch : bool + Whether the patch is 3D or not. + + Raises + ------ + ValueError + If the patch size is not consistent with the array shape (one more array + dimension). + ValueError + If the patch size in Z is larger than the array dimension. + ValueError + If either of the patch sizes in X or Y is larger than the corresponding array + dimension. + """ + if len(patch_size) != len(arr.shape[2:]): + raise ValueError( + f"There must be a patch size for each spatial dimensions " + f"(got {patch_size} patches for dims {arr.shape})." + ) + + # Sanity checks on patch sizes versus array dimension + if is_3d_patch and patch_size[0] > arr.shape[-3]: + raise ValueError( + f"Z patch size is inconsistent with image shape " + f"(got {patch_size[0]} patches for dim {arr.shape[1]})." + ) + + if patch_size[-2] > arr.shape[-2] or patch_size[-1] > arr.shape[-1]: + raise ValueError( + f"At least one of YX patch dimensions is larger than the corresponding " + f"image dimension (got {patch_size} patches for dims {arr.shape[-2:]})." + ) + + # Update patch size to SC(Z)YX format + return [1, arr.shape[1], *patch_size] diff --git a/src/careamics/dataset/prepare_dataset.py b/src/careamics/dataset/prepare_dataset.py deleted file mode 100644 index 8f2f9d0d..00000000 --- a/src/careamics/dataset/prepare_dataset.py +++ /dev/null @@ -1,264 +0,0 @@ -""" -Dataset preparation module. - -Methods to set up the datasets for training, validation and prediction. -""" -from pathlib import Path -from typing import Callable, List, Optional, Union - -import numpy as np - -from ..config.data import DataModel -from ..utils import check_external_array_validity, check_tiling_validity -from .extraction_strategy import ExtractionStrategy -from .in_memory_dataset import InMemoryDataset, InMemoryPredictionDataset -from .iterable_dataset import IterableDataset -from .zarr_dataset import ZarrDataset - - -# TODO what is the difference between train and val datasets?? -# TODO it could be from memory as well here, yet it only takes a str (and not even a Path) -def get_train_dataset( - data_config: DataModel, - train_path: str, - train_target_path: Optional[str] = None, - read_source_func: Optional[Callable] = None, -) -> Union[IterableDataset, InMemoryDataset, ZarrDataset]: - """ - Create training dataset. - - Depending on the configuration, this methods return either a TiffDataset or an - InMemoryDataset. - - Parameters - ---------- - config : Data - Configuration. - train_path : Union[str, Path] - Path to training data. - - Returns - ------- - Union[TiffDataset, InMemoryDataset] - Dataset. - """ - if data_config.in_memory: - dataset = InMemoryDataset( - data_path=train_path, - data_format=data_config.extension, - axes=data_config.axes, - mean=data_config.mean, - std=data_config.std, - patch_size=data_config.patch_size, - patch_transform=data_config.transforms, - target_path=train_target_path, - target_format=data_config.data_format, - read_source_func=read_source_func, - ) - elif data_config.extension in ["tif", "tiff"]: - dataset = IterableDataset( - data_path=train_path, - data_format=data_config.extension, - axes=data_config.axes, - mean=data_config.mean, - std=data_config.std, - patch_extraction_method=ExtractionStrategy.RANDOM, - patch_size=data_config.patch_size, - patch_transform=data_config.transforms, - target_path=train_target_path, - target_format=data_config.extension, - ) - # elif config.data.data_format == "zarr": - # if ".zarray" in os.listdir(train_path): - # zarr_source = zarr.open(train_path, mode="r") - # else: - # source = zarr.DirectoryStore(train_path) - # cache = zarr.LRUStoreCache(source, max_size=None) - # zarr_source = zarr.group(store=cache, overwrite=False) - - # dataset = ZarrDataset( - # data_source=zarr_source, - # axes=config.data.axes, - # patch_extraction_method=ExtractionStrategy.RANDOM_ZARR, - # patch_size=config.training.patch_size, - # mean=config.data.mean, - # std=config.data.std, - # patch_transform=default_manipulate, - # patch_transform_params={ - # "mask_pixel_percentage": config.algorithm.masked_pixel_percentage, - # "roi_size": config.algorithm.roi_size, - # }, - # ) - return dataset - - -def get_validation_dataset( - data_config: DataModel, - val_path: str, - val_target_path: Optional[str] = None, - read_source_func: Optional[Callable] = None, -) -> Union[InMemoryDataset, ZarrDataset]: - """ - Create validation dataset. - - Validation dataset is kept in memory. - - Parameters - ---------- - config : Data - Configuration. - val_path : Union[str, Path] - Path to validation data. - - Returns - ------- - TiffDataset - In memory dataset. - """ - # TODO what about iterable dataset for validation?? - if data_config.extension in ["tif", "tiff"]: - dataset = InMemoryDataset( - data_path=val_path, - data_format=data_config.extension, - axes=data_config.axes, - mean=data_config.mean, - std=data_config.std, - patch_size=data_config.patch_size, - patch_transform=data_config.transforms, - target_path=val_target_path, - target_format=data_config.data_format, - read_source_func=read_source_func, - ) - # elif data_config.data_format == "zarr": - # if ".zarray" in os.listdir(val_path): - # zarr_source = zarr.open(val_path, mode="r") - # else: - # source = zarr.DirectoryStore(val_path) - # cache = zarr.LRUStoreCache(source, max_size=None) - # zarr_source = zarr.group(store=cache, overwrite=False) - - # dataset = ZarrDataset( - # data_source=zarr_source, - # axes=data_config.axes, - # patch_extraction_method=ExtractionStrategy.RANDOM_ZARR, - # patch_size=data_config.patch_size, - # num_patches=10, - # mean=data_config.mean, - # std=data_config.std, - # patch_transform=default_manipulate, - # patch_transform_params={ - # "mask_pixel_percentage": data_config.masked_pixel_percentage, - # "roi_size": data_config.roi_size, - # }, - # ) - - return dataset - - -def get_prediction_dataset( - data_config: DataModel, - pred_source: Union[str, Path, np.ndarray], - *, - tile_shape: Optional[List[int]] = None, - overlaps: Optional[List[int]] = None, - axes: Optional[str] = None, - read_source_func: Optional[Callable] = None, -) -> Union[IterableDataset, ZarrDataset]: - """ - Create prediction dataset. - - To use tiling, both `tile_shape` and `overlaps` must be specified, have same - length, be divisible by 2 and greater than 0. Finally, the overlaps must be - smaller than the tiles. - - By default, axes are extracted from the configuration. To use images with - different axes, set the `axes` parameter. Note that the difference between - configuration and parameter axes must be S or T, but not any of the spatial - dimensions (e.g. 2D vs 3D). - - Parameters - ---------- - config : Data - Configuration. - pred_path : Union[str, Path] - Path to prediction data. - tile_shape : Optional[List[int]], optional - 2D or 3D shape of the tiles, by default None. - overlaps : Optional[List[int]], optional - 2D or 3D overlaps between tiles, by default None. - axes : Optional[str], optional - Axes of the data, by default None. - - Returns - ------- - TiffDataset - Dataset. - """ - use_tiling = False # default value - - # Validate tiles and overlaps - if tile_shape is not None and overlaps is not None: - check_tiling_validity(tile_shape, overlaps) - - # Use tiling - use_tiling = True - - # Extraction strategy - if use_tiling: - patch_extraction_method = ExtractionStrategy.TILED - else: - patch_extraction_method = None - - # Create dataset - if isinstance(pred_source, np.ndarray): - check_external_array_validity(pred_source, axes, use_tiling) - - dataset = InMemoryPredictionDataset( - array=pred_source, - axes=axes if axes is not None else data_config.axes, - tile_size=tile_shape, - tile_overlap=overlaps, - mean=data_config.mean, - std=data_config.std, - read_source_func=read_source_func, - ) - elif isinstance(pred_source, str) or isinstance(pred_source, Path): - if data_config.extension in ["tif", "tiff"]: - dataset = IterableDataset( - data_path=pred_source, - data_format=data_config.extension, - axes=axes if axes is not None else data_config.axes, - mean=data_config.mean, - std=data_config.std, - patch_extraction_method=patch_extraction_method, - patch_size=tile_shape, - patch_overlap=overlaps, - patch_transform=None, - read_source_func=read_source_func, - ) - # elif data_config.data_format == "zarr": - # if ".zarray" in os.listdir(pred_source): - # zarr_source = zarr.open(pred_source, mode="r") - # else: - # source = zarr.DirectoryStore(pred_source) - # cache = zarr.LRUStoreCache(source, max_size=None) - # zarr_source = zarr.group(store=cache, overwrite=False) - - # dataset = ZarrDataset( - # data_source=zarr_source, - # axes=axes if axes is not None else data_config.axes, - # patch_extraction_method=ExtractionStrategy.RANDOM_ZARR, - # patch_size=tile_shape, - # num_patches=10, - # mean=data_config.mean, - # std=data_config.std, - # patch_transform=default_manipulate, - # patch_transform_params={ - # # TODO these parameters have disappeared from the config - # "mask_pixel_percentage": data_config.algorithm.masked_pixel_percentage, - # "roi_size": data_config.algorithm.roi_size, - # }, - # mode="predict", - # ) - - return dataset diff --git a/src/careamics/dataset/zarr_dataset.py b/src/careamics/dataset/zarr_dataset.py index fc45a9cc..5f279e90 100644 --- a/src/careamics/dataset/zarr_dataset.py +++ b/src/careamics/dataset/zarr_dataset.py @@ -8,10 +8,12 @@ from careamics.utils import RunningStats from careamics.utils.logging import get_logger +from ..config.support.supported_extraction_strategies import SupportedExtractionStrategy from ..utils import normalize -from .dataset_utils import read_zarr -from .extraction_strategy import ExtractionStrategy -from .patching import generate_patches_supervised, generate_patches_unsupervised +from .dataset_utils.dataset_utils import read_zarr +from .patching.patching import ( + generate_patches_unsupervised, +) logger = get_logger(__name__) @@ -50,7 +52,7 @@ def __init__( self, data_source: Union[zarr.Group, zarr.Array], axes: str, - patch_extraction_method: Union[ExtractionStrategy, None], + patch_extraction_method: Union[SupportedExtractionStrategy, None], patch_size: Optional[Union[List[int], Tuple[int]]] = None, num_patches: Optional[int] = None, mean: Optional[float] = None, @@ -97,7 +99,7 @@ def _generate_patches(self): np.ndarray Patch. """ - patches = generate_patches( + patches = generate_patches_unsupervised( self.sample, self.patch_extraction_method, self.patch_size, diff --git a/src/careamics/lightning_module.py b/src/careamics/lightning_module.py index cb8c5031..7a228291 100644 --- a/src/careamics/lightning_module.py +++ b/src/careamics/lightning_module.py @@ -1,17 +1,9 @@ -from pathlib import Path -from typing import Any, Callable, List, Optional, Union +from typing import Any, Optional, Union -import numpy as np import pytorch_lightning as L -from albumentations import Compose -from pytorch_lightning.loops.fetchers import _DataLoaderIterDataFetcher -from pytorch_lightning.loops.utilities import _no_grad_context -from pytorch_lightning.trainer import call -from pytorch_lightning.utilities.types import _PREDICT_OUTPUT -from torch import nn, optim -from torch.utils.data import DataLoader +from torch import nn -from careamics.config import AlgorithmModel, DataModel +from careamics.config import AlgorithmModel from careamics.config.support import ( SupportedAlgorithm, SupportedArchitecture, @@ -19,116 +11,24 @@ SupportedOptimizer, SupportedScheduler, ) -from careamics.dataset.dataset_utils import ( - data_type_validator, - list_files, - validate_files, -) -from careamics.dataset.in_memory_dataset import ( - InMemoryDataset, - InMemoryPredictionDataset, -) -from careamics.dataset.iterable_dataset import ( - IterableDataset, - IterablePredictionDataset, -) -from careamics.losses import create_loss_function -from careamics.models.model_factory import model_registry -from careamics.prediction import stitch_prediction -from careamics.utils import get_ram_size - - -class CAREamicsFiring(L.loops._PredictionLoop): - """Predict loop for tiles-based prediction.""" - - # def _predict_step(self, batch, batch_idx, dataloader_idx, dataloader_iter): - # self.model.predict_step(batch, batch_idx) - - @_no_grad_context - def run(self) -> Optional[_PREDICT_OUTPUT]: - self.setup_data() - if self.skip: - return None - self.reset() - self.on_run_start() - data_fetcher = self._data_fetcher - assert data_fetcher is not None - while True: - try: - if isinstance(data_fetcher, _DataLoaderIterDataFetcher): - dataloader_iter = next(data_fetcher) - # hook's batch_idx and dataloader_idx arguments correctness cannot be guaranteed in this setting - batch = data_fetcher._batch - batch_idx = data_fetcher._batch_idx - dataloader_idx = data_fetcher._dataloader_idx - else: - dataloader_iter = None - batch, batch_idx, dataloader_idx = next(data_fetcher) - self.batch_progress.is_last_batch = data_fetcher.done - # run step hooks - self._predict_step(batch, batch_idx, dataloader_idx, dataloader_iter) - except StopIteration: - # this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support - break - finally: - self._restarting = False - return self.on_run_end() - -def predict_tiled_simple( - predictions: list, -) -> Union[np.ndarray, List[np.ndarray]]: - """ - Predict using tiling. +from careamics.losses import loss_factory +from careamics.models.model_factory import model_factory +from careamics.utils.torch_utils import get_optimizer, get_scheduler - Parameters - ---------- - pred_loader : DataLoader - Prediction dataloader. - progress_bar : ProgressBar - Progress bar. - tta : bool, optional - Whether to use test time augmentation, by default True. - Returns - ------- - Union[np.ndarray, List[np.ndarray]] - Predicted image, or list of predictions if the images have different sizes. +class CAREamicsKiln(L.LightningModule): + """CAREamics internal Lightning module class. - Warns - ----- - UserWarning - If the samples have different shapes, the prediction then returns a list. + This class is configured using an AlgorithmModel instance, parameterizing the deep + learning model, and defining training and validation steps. """ - prediction = [] - tiles = [] - stitching_data = [] - - for _i, (_tile, *auxillary) in enumerate(predictions): - # Unpack auxillary data into last tile indicator and data, required to - # stitch tiles together - if auxillary: - last_tile, *stitching_data = auxillary - - if last_tile: - # Stitch tiles together if sample is finished - predicted_sample = stitch_prediction(tiles, stitching_data) - prediction.append(predicted_sample) - tiles.clear() - stitching_data.clear() - - try: - return np.stack(prediction) - except ValueError: - return prediction - -class CAREamicsKiln(L.LightningModule): def __init__(self, algorithm_config: AlgorithmModel) -> None: super().__init__() # create model and loss function - self.model: nn.Module = model_registry(algorithm_config.model) - self.loss_func = create_loss_function(algorithm_config.loss) + self.model: nn.Module = model_factory(algorithm_config.model) + self.loss_func = loss_factory(algorithm_config.loss) # save optimizer and lr_scheduler names and parameters self.optimizer_name = algorithm_config.optimizer.name @@ -160,38 +60,72 @@ def predict_step(self, batch, batch_idx) -> Any: def configure_optimizers(self) -> Any: # instantiate optimizer - optimizer_func = getattr(optim, self.optimizer_name) + optimizer_func = get_optimizer(self.optimizer_name) optimizer = optimizer_func(self.model.parameters(), **self.optimizer_params) # and scheduler - scheduler_func = getattr(optim.lr_scheduler, self.lr_scheduler_name) + scheduler_func = get_scheduler(self.lr_scheduler_name) scheduler = scheduler_func(optimizer, **self.lr_scheduler_params) return { "optimizer": optimizer, "lr_scheduler": scheduler, - "monitor": "val_loss", # otherwise one gets a MisconfigurationException + "monitor": "val_loss", # otherwise triggers MisconfigurationException } -# TODO consider using a Literal[...] instead of the enums here? class CAREamicsModule(CAREamicsKiln): + """Class defining the API for CAREamics Lightning layer. + + This class exposes parameters used to create an AlgorithmModel instance, triggering + parameters validation. + + Parameters + ---------- + algorithm : Union[SupportedAlgorithm, str] + Algorithm to use for training (see SupportedAlgorithm). + loss : Union[SupportedLoss, str] + Loss function to use for training (see SupportedLoss). + architecture : Union[SupportedArchitecture, str] + Model architecture to use for training (see SupportedArchitecture). + model_parameters : dict, optional + Model parameters to use for training, by default {}. Model parameters are + defined in the relevant `torch.nn.Module` class, or Pyddantic model (see + `careamics.config.architectures`). + optimizer : Union[SupportedOptimizer, str], optional + Optimizer to use for training, by default "Adam" (see SupportedOptimizer). + optimizer_parameters : dict, optional + Optimizer parameters to use for training, as defined in `torch.optim`, by + default {}. + lr_scheduler : Union[SupportedScheduler, str], optional + Learning rate scheduler to use for training, by default "ReduceLROnPlateau" + (see SupportedScheduler). + lr_scheduler_parameters : dict, optional + Learning rate scheduler parameters to use for training, as defined in + `torch.optim`, by default {}. + """ + def __init__( self, algorithm: Union[SupportedAlgorithm, str], loss: Union[SupportedLoss, str], architecture: Union[SupportedArchitecture, str], - model_parameters: dict = {}, + model_parameters: Optional[dict] = None, optimizer: Union[SupportedOptimizer, str] = "Adam", - optimizer_parameters: dict = {}, + optimizer_parameters: Optional[dict] = None, lr_scheduler: Union[SupportedScheduler, str] = "ReduceLROnPlateau", - lr_scheduler_parameters: dict = {}, + lr_scheduler_parameters: Optional[dict] = None, ) -> None: - + # create a AlgorithmModel compatible dictionary + if lr_scheduler_parameters is None: + lr_scheduler_parameters = {} + if optimizer_parameters is None: + optimizer_parameters = {} + if model_parameters is None: + model_parameters = {} algorithm_configuration = { "algorithm": algorithm, "loss": loss, - "model": {"architecture": architecture}, "optimizer": { "name": optimizer, "parameters": optimizer_parameters, @@ -199,229 +133,13 @@ def __init__( "lr_scheduler": { "name": lr_scheduler, "parameters": lr_scheduler_parameters, - } + }, } + model_configuration = {"architecture": architecture} + model_configuration.update(model_parameters) - # add model parameters - algorithm_configuration["model"].update(model_parameters) + # add model parameters to algorithm configuration + algorithm_configuration["model"] = model_configuration + # call the parent init using an AlgorithmModel instance super().__init__(AlgorithmModel(**algorithm_configuration)) - - -class CAREamicsWood(L.LightningDataModule): - def __init__( - self, - data_config: DataModel, - train_path: Union[Path, str], - val_path: Union[Path, str], - train_target_path: Optional[Union[Path, str]] = None, - val_target_path: Optional[Union[Path, str]] = None, - read_source_func: Optional[Callable] = None, - ) -> None: - super().__init__() - - self.data_config = data_config - self.train_path = train_path - self.val_path = val_path - self.data_type = data_config.data_type - self.train_target_path = train_target_path - self.val_target_path = val_target_path - self.read_source_func = read_source_func - self.batch_size = data_config.batch_size - self.num_workers = data_config.num_workers - self.pin_memory = data_config.pin_memory - - def prepare_data(self) -> None: - data_type_validator(self.data_type, self.read_source_func) - self.train_files, self.train_data_size = list_files( - self.train_path, self.data_type - ) - self.val_files, _ = list_files(self.val_path, self.data_type) - - if self.train_target_path is not None: - self.train_target_files, _ = list_files( - self.train_target_path, self.data_type - ) - validate_files(self.data_files, self.target_files) - - def setup(self, stage: Optional[str] = None) -> None: - if self.data_type == "zarr": - pass - elif self.data_type == "array": - self.train_dataset = InMemoryDataset( - files=self.train_files, - config=self.data_config, - target_files=self.train_target_files - if self.train_target_path - else None, - read_source_func=self.read_source_func, - ) - self.val_dataset = InMemoryDataset( - files=self.val_files, - config=self.data_config, - target_files=self.val_target_files if self.val_target_path else None, - read_source_func=self.read_source_func, - ) - else: - if self.train_data_size > get_ram_size() * 0.8: - self.train_dataset = IterableDataset( - files=self.train_files, - config=self.data_config, - target_files=self.train_target_files - if self.train_target_path - else None, - read_source_func=self.read_source_func, - ) - self.val_dataset = IterableDataset( - files=self.val_files, - config=self.data_config, - target_files=self.val_target_files - if self.val_target_path - else None, - read_source_func=self.read_source_func, - ) - - else: - self.train_dataset = InMemoryDataset( - files=self.train_files, - config=self.data_config, - target_files=self.train_target_files - if self.train_target_path - else None, - read_source_func=self.read_source_func, - ) - self.val_dataset = InMemoryDataset( - files=self.val_files, - config=self.data_config, - target_files=self.val_target_files - if self.val_target_path - else None, - read_source_func=self.read_source_func, - ) - - def train_dataloader(self) -> Any: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - pin_memory=self.pin_memory, - ) - - def val_dataloader(self) -> Any: - return DataLoader( - self.val_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - pin_memory=self.pin_memory, - ) - - -class CAREamicsClay(L.LightningDataModule): - def __init__( - self, - data_config: DataModel, - pred_path: Union[Path, str], - read_source_func: Optional[Callable] = None, - ) -> None: - super().__init__() - - self.data_config = data_config - self.pred_path = pred_path - self.data_type = data_config.data_type - self.read_source_func = read_source_func - self.batch_size = data_config.batch_size - self.num_workers = data_config.num_workers - self.pin_memory = data_config.pin_memory - - def prepare_data(self) -> None: - data_type_validator(self.data_type, self.read_source_func) - self.pred_files, _ = list_files(self.pred_path, self.data_type) - - def setup(self, stage: Optional[str] = None) -> None: - if self.data_type == "Zarr": - pass - elif self.data_type == "Array": - self.predict_dataset = InMemoryPredictionDataset( - files=self.pred_files, - config=self.data_config, - read_source_func=self.read_source_func, - ) - else: - self.predict_dataset = IterablePredictionDataset( - files=self.pred_files, - config=self.data_config, - read_source_func=self.read_source_func, - ) - - def predict_dataloader(self) -> Any: - return DataLoader( - self.predict_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - pin_memory=self.pin_memory, - ) - - -class CAREamicsTrainDataModule(CAREamicsWood): - def __init__( - self, - train_path: Union[str, Path], - val_path: Union[str, Path], - data_type: str, - patch_size: List[int], - axes: str, - batch_size: int, - transforms: Optional[Union[List, Compose]] = None, - train_target_path: Optional[Union[str, Path]] = None, - val_target_path: Optional[Union[str, Path]] = None, - read_source_func: Optional[Callable] = None, - data_loader_params: Optional[dict] = None, - **kwargs, - ) -> None: - data_loader_params = data_loader_params if data_loader_params else {} - data_config = { - "data_type": data_type.lower(), - "patch_size": patch_size, - "axes": axes, - "transforms": transforms, - "batch_size": batch_size, - **data_loader_params, - } - super().__init__( - data_config=DataModel(**data_config), - train_path=train_path, - val_path=val_path, - train_target_path=train_target_path, - val_target_path=val_target_path, - read_source_func=read_source_func, - ) - - -class CAREamicsPredictDataModule(CAREamicsClay): - def __init__( - self, - pred_path: Union[str, Path], - data_type: str, - tile_size: List[int], - axes: str, - batch_size: int, - transforms: Optional[Union[List, Compose]] = None, - read_source_func: Optional[Callable] = None, - data_loader_params: Optional[dict] = None, - **kwargs, - ) -> None: - data_loader_params = data_loader_params if data_loader_params else {} - - data_config = { - "data_type": data_type, - "patch_size": tile_size, - "axes": axes, - "transforms": transforms, - "batch_size": batch_size, - **data_loader_params, - } - super().__init__( - data_config=DataModel(**data_config), - pred_path=pred_path, - read_source_func=read_source_func, - ) diff --git a/src/careamics/lightning_prediction.py b/src/careamics/lightning_prediction.py new file mode 100644 index 00000000..ac261069 --- /dev/null +++ b/src/careamics/lightning_prediction.py @@ -0,0 +1,135 @@ +from typing import List, Optional, Union + +import numpy as np +import pytorch_lightning as L +from pytorch_lightning.loops.fetchers import _DataLoaderIterDataFetcher +from pytorch_lightning.loops.utilities import _no_grad_context +from pytorch_lightning.trainer import call +from pytorch_lightning.utilities.types import _PREDICT_OUTPUT + +from careamics.prediction import stitch_prediction +from careamics.utils import denormalize + + +class CAREamicsFiring(L.loops._PredictionLoop): + """Predict loop for tiles-based prediction.""" + + # def _predict_step(self, batch, batch_idx, dataloader_idx, dataloader_iter): + # self.model.predict_step(batch, batch_idx) + + def _on_predict_epoch_end(self) -> Optional[_PREDICT_OUTPUT]: + """Calls ``on_predict_epoch_end`` hook. + + Returns + ------- + the results for all dataloaders + + """ + trainer = self.trainer + call._call_callback_hooks(trainer, "on_predict_epoch_end") + call._call_lightning_module_hook(trainer, "on_predict_epoch_end") + + if self.return_predictions: + return self.predicted_array + return None + + @_no_grad_context + def run(self) -> Optional[_PREDICT_OUTPUT]: + self.setup_data() + if self.skip: + return None + self.reset() + self.on_run_start() + data_fetcher = self._data_fetcher + assert data_fetcher is not None + + self.predicted_array = [] + self.tiles = [] + self.stitching_data = [] + + while True: + try: + if isinstance(data_fetcher, _DataLoaderIterDataFetcher): + dataloader_iter = next(data_fetcher) + # hook's batch_idx and dataloader_idx arguments correctness cannot + # be guaranteed in this setting + batch = data_fetcher._batch + batch_idx = data_fetcher._batch_idx + dataloader_idx = data_fetcher._dataloader_idx + else: + dataloader_iter = None + batch, batch_idx, dataloader_idx = next(data_fetcher) + self.batch_progress.is_last_batch = data_fetcher.done + # run step hooks + self._predict_step(batch, batch_idx, dataloader_idx, dataloader_iter) + + # Stitching tiles together + last_tile, *data = self.predictions[batch_idx][1] + self.tiles.append(self.predictions[batch_idx][0]) + self.stitching_data.append(data) + if last_tile: + predicted_sample = stitch_prediction( + self.tiles, self.stitching_data + ) + # TODO replace with Albu class + denormalized_sample = denormalize( + predicted_sample, + self._data_source.instance.predict_dataset.mean, + self._data_source.instance.predict_dataset.std, + ) + self.predicted_array.append(denormalized_sample) + self.tiles.clear() + self.stitching_data.clear() + except StopIteration: + break + finally: + self._restarting = False + return self.on_run_end() + + +def predict_tiled_simple( + predictions: list, +) -> Union[np.ndarray, List[np.ndarray]]: + """ + Predict using tiling. + + Parameters + ---------- + pred_loader : DataLoader + Prediction dataloader. + progress_bar : ProgressBar + Progress bar. + tta : bool, optional + Whether to use test time augmentation, by default True. + + Returns + ------- + Union[np.ndarray, List[np.ndarray]] + Predicted image, or list of predictions if the images have different sizes. + + Warns + ----- + UserWarning + If the samples have different shapes, the prediction then returns a list. + """ + prediction = [] + tiles = [] + stitching_data = [] + + for _i, (_tile, *auxillary) in enumerate(predictions): + # Unpack auxillary data into last tile indicator and data, required to + # stitch tiles together + if auxillary: + last_tile, *stitching_data = auxillary + + if last_tile: + # Stitch tiles together if sample is finished + predicted_sample = stitch_prediction(tiles, stitching_data) + prediction.append(predicted_sample) + tiles.clear() + stitching_data.clear() + + try: + return np.stack(prediction) + except ValueError: + return prediction diff --git a/src/careamics/ligthning_datamodule.py b/src/careamics/ligthning_datamodule.py new file mode 100644 index 00000000..235a6bdf --- /dev/null +++ b/src/careamics/ligthning_datamodule.py @@ -0,0 +1,525 @@ +from pathlib import Path +from typing import Any, Callable, List, Optional, Tuple, Union + +import numpy as np +import pytorch_lightning as L +from albumentations import Compose +from torch.utils.data import DataLoader + +from careamics.config import DataModel +from careamics.config.support import SupportedData +from careamics.dataset.dataset_utils import ( + get_files_size, + get_read_func, + list_files, + reshape_array, + validate_source_target_files, +) +from careamics.dataset.in_memory_dataset import ( + InMemoryDataset, + InMemoryPredictionDataset, +) +from careamics.dataset.iterable_dataset import ( + IterableDataset, + IterablePredictionDataset, +) +from careamics.utils import get_ram_size + + +# TODO must be compatible with no validation being present +class CAREamicsWood(L.LightningDataModule): + def __init__( + self, + data_config: DataModel, + train_data: Union[Path, str, np.ndarray], + val_data: Optional[Union[Path, str, np.ndarray]] = None, + train_data_target: Optional[Union[Path, str, np.ndarray]] = None, + val_data_target: Optional[Union[Path, str, np.ndarray]] = None, + read_source_func: Optional[Callable] = None, + extension_filter: str = "", + val_percentage: float = 0.1, + val_minimum_split: int = 5, + use_in_memory: bool = True, + ) -> None: + """LightningDataModule for CAREamics training, including training and validation + datasets. + + The data module can be used with Path, str or numpy arrays. In the case of + numpy arrays, it loads and computes all the patches in memory. For Path and str + inputs, it calculates the total file size and estimate whether it can fit in + memory. If it does not, it iterates through the files. This behaviour can be + deactivated by setting `use_in_memory` to False, in which case it will + always use the iterating dataset to train on a Path or str. + + The data can be either a folder containing images or a single file. + + Validation can be omitted, in which case the validation data is extracted from + the training data. The percentage of the training data to use for validation, + as well as the minimum number of patches or files to split from the training + data can be set using `val_percentage` and `val_minimum_split`, respectively. + + To read custom data types, you can set `data_type` to `custom` in `data_config` + and provide a function that returns a numpy array from a path as + `read_source_func` parameter. The function will receive a Path object and + an axies string as arguments, the axes being derived from the `data_config`. + # TODO is this necessary to pass the axes? + + You can also provide a `fnmatch` and `Path.rglob` compatible expression (e.g. + "*.czi") to filter the files extension using `extension_filter`. + + Parameters + ---------- + data_config : DataModel + Pydantic model for CAREamics data configuration. + train_data : Union[Path, str, np.ndarray] + Training data, can be a path to a folder, a file or a numpy array. + val_data : Optional[Union[Path, str, np.ndarray]], optional + Validation data, can be a path to a folder, a file or a numpy array, by + default None. + train_data_target : Optional[Union[Path, str, np.ndarray]], optional + Training target data, can be a path to a folder, a file or a numpy array, by + default None. + val_data_target : Optional[Union[Path, str, np.ndarray]], optional + Validation target data, can be a path to a folder, a file or a numpy array, + by default None. + read_source_func : Optional[Callable], optional + Function to read the source data, by default None. Only used for `custom` + data type (see DataModel). + extension_filter : str, optional + Filter for file extensions, by default "". Only used for `custom` data types + (see DataModel). + val_percentage : float, optional + Percentage of the training data to use for validation, by default 0.1. Only + used if `val_data` is None. + val_minimum_split : int, optional + Minimum number of patches or files to split from the training data for + validation, by default 5. Only used if `val_data` is None. + + Raises + ------ + NotImplementedError + Raised if target data is provided. + ValueError + If the input types are mixed (e.g. Path and np.ndarray). + ValueError + If the data type is `custom` and no `read_source_func` is provided. + ValueError + If the data type is `array` and the input is not a numpy array. + ValueError + If the data type is `tiff` and the input is neither a Path nor a str. + """ + super().__init__() + + if train_data_target is not None: + raise NotImplementedError( + "Training with target data is not yet implemented." + ) + + # check input types coherence (no mixed types) + inputs = [train_data, val_data, train_data_target, val_data_target] + types_set = {type(i) for i in inputs} + if len(types_set) > 2: # None + expected type + raise ValueError( + f"Inputs for `train_data`, `val_data`, `train_data_target` and " + f"`val_data_target` must be of the same type or None. Got " + f"{types_set}." + ) + + # check that a read source function is provided for custom types + if data_config.data_type == SupportedData.CUSTOM and read_source_func is None: + raise ValueError( + f"Data type {SupportedData.CUSTOM} is not allowed without " + f"specifying a `read_source_func`." + ) + + # and that arrays are passed, if array type specified + elif data_config.data_type == SupportedData.ARRAY and not isinstance( + train_data, np.ndarray + ): + raise ValueError( + f"Expected array input (see configuration.data.data_type), but got " + f"{type(train_data)} instead." + ) + + # and that Path or str are passed, if tiff file type specified + elif data_config.data_type == SupportedData.TIFF and ( + not isinstance(train_data, Path) and not isinstance(train_data, str) + ): + raise ValueError( + f"Expected Path or str input (see configuration.data.data_type), " + f"but got {type(train_data)} instead." + ) + + # configuration + self.data_config = data_config + self.data_type = data_config.data_type + self.batch_size = data_config.batch_size + self.num_workers = data_config.num_workers + self.pin_memory = data_config.pin_memory + self.use_in_memory = use_in_memory + + # data + self.train_data = train_data + self.val_data = val_data + + self.train_data_target = train_data_target + self.val_data_target = val_data_target + self.val_percentage = val_percentage + self.val_minimum_split = val_minimum_split + + # read source function corresponding to the requested type + if data_config.data_type == SupportedData.CUSTOM: + self.read_source_func = read_source_func + else: + self.read_source_func = get_read_func(data_config.data_type) + self.extension_filter = extension_filter + + def prepare_data(self) -> None: + """Hook used to prepare the data before calling `setup` and creating + the dataloader. + + Here, we only need to examine the data if it was provided as a str or a Path. + """ + # if the data is a Path or a str + if not isinstance(self.train_data, np.ndarray): + # list training files + self.train_files = list_files( + self.train_data, self.data_type, self.extension_filter + ) + self.train_files_size = get_files_size(self.train_files) + + # list validation files + if self.val_data is not None: + self.val_files = list_files( + self.val_data, self.data_type, self.extension_filter + ) + + # same for target data + if self.train_data_target is not None: + self.train_target_files = list_files( + self.train_data_target, self.data_type, self.extension_filter + ) + + # verify that they match the training data + validate_source_target_files(self.train_files, self.train_target_files) + + if self.val_data_target is not None: + self.val_target_files = list_files( + self.val_data_target, self.data_type, self.extension_filter + ) + + # verify that they match the validation data + validate_source_target_files(self.val_files, self.val_target_files) + # else: + # # reshape array + # self.train_data = reshape_array(self.train_data, self.data_config.axes) + + # # validation + # if self.val_data is not None: + # self.val_data = reshape_array(self.val_data, self.data_config.axes) + + # # target arrays + # if self.train_data_target is not None: + # self.train_data_target = reshape_array( + # self.train_data_target, self.data_config.axes + # ) + + # if self.val_data_target is not None: + # self.val_data_target = reshape_array( + # self.val_data_target, self.data_config.axes + # ) + + def setup(self, *args, **kwargs) -> None: + """Hook called at the beginning of fit (train + validate), validate, test, or + predict. + """ + # if numpy array + if self.data_type == SupportedData.ARRAY: + # train dataset + self.train_dataset = InMemoryDataset( + data_config=self.data_config, + data=self.train_data, + data_target=self.train_data_target, + ) + + # validation dataset + if self.val_data is not None: + # create its own dataset + self.val_dataset = InMemoryDataset( + data_config=self.data_config, + data=self.val_data, + data_target=self.val_data_target, + ) + else: + # extract validation from the training patches + self.val_dataset = self.train_dataset.split_dataset( + percentage=self.val_percentage, + minimum_patches=self.val_minimum_split, + ) + + # else we read files + else: + # Heuristics, if the file size is smaller than 80% of the RAM, + # we run the training in memory, otherwise we switch to iterable dataset + # The switch is deactivated if use_in_memory is False + if self.use_in_memory and self.train_files_size < get_ram_size() * 0.8: + # train dataset + self.train_dataset = InMemoryDataset( + data_config=self.data_config, + data=self.train_files, + data_target=self.train_target_files + if self.train_data_target + else None, + read_source_func=self.read_source_func, + ) + + # validation dataset + if self.val_files is not None: + self.val_dataset = InMemoryDataset( + data_config=self.data_config, + data=self.val_files, + data_target=self.val_target_files + if self.val_data_target + else None, + read_source_func=self.read_source_func, + ) + else: + # split dataset + self.val_dataset = self.train_dataset.split_dataset( + percentage=self.val_percentage, + minimum_patches=self.val_minimum_split, + ) + + # else if the data is too large, load file by file during training + else: + # create training dataset + self.train_dataset = IterableDataset( + data_config=self.data_config, + src_files=self.train_files, + target_files=self.train_target_files + if self.train_data_target + else None, + read_source_func=self.read_source_func, + ) + + # create validation dataset + if self.val_files is not None: + # create its own dataset + self.val_dataset = IterableDataset( + data_config=self.data_config, + src_files=self.val_files, + target_files=self.val_target_files + if self.val_data_target + else None, + read_source_func=self.read_source_func, + ) + elif len(self.train_files) <= self.val_minimum_split: + raise ValueError( + f"Not enough files to split a minimum of " + f"{self.val_minimum_split} files, got {len(self.train_files)} " + f"files." + ) + else: + # extract validation from the training patches + self.val_dataset = self.train_dataset.split_dataset( + percentage=self.val_percentage, + minimum_files=self.val_minimum_split, + ) + + def train_dataloader(self) -> Any: + return DataLoader( + self.train_dataset, + batch_size=self.batch_size, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + ) + + def val_dataloader(self) -> Any: + return DataLoader( + self.val_dataset, + batch_size=self.batch_size, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + ) + + +class CAREamicsClay(L.LightningDataModule): + def __init__( + self, + data_config: DataModel, + pred_data: Union[Path, str, np.ndarray], + tile_size: Union[List[int], Tuple[int]], + tile_overlap: Union[List[int], Tuple[int]], + read_source_func: Optional[Callable] = None, + extension_filter: str = "", + ) -> None: + super().__init__() + + # check that a read source function is provided for custom types + if data_config.data_type == SupportedData.CUSTOM and read_source_func is None: + raise ValueError( + f"Data type {SupportedData.CUSTOM} is not allowed without " + f"specifying a `read_source_func`." + ) + + # and that arrays are passed, if array type specified + elif data_config.data_type == SupportedData.ARRAY and not isinstance( + pred_data, np.ndarray + ): + raise ValueError( + f"Expected array input (see configuration.data.data_type), but got " + f"{type(pred_data)} instead." + ) + + # and that Path or str are passed, if tiff file type specified + elif data_config.data_type == SupportedData.TIFF and not ( + isinstance(pred_data, Path) or isinstance(pred_data, str) + ): + raise ValueError( + f"Expected Path or str input (see configuration.data.data_type), " + f"but got {type(pred_data)} instead." + ) + + # configuration data + self.data_config = data_config + self.data_type = data_config.data_type + self.batch_size = data_config.batch_size + self.num_workers = data_config.num_workers + self.pin_memory = data_config.pin_memory + + self.pred_data = pred_data + self.tile_size = tile_size + self.tile_overlap = tile_overlap + + # read source function + if data_config.data_type == SupportedData.CUSTOM: + self.read_source_func = read_source_func + else: + self.read_source_func = get_read_func(data_config.data_type) + self.extension_filter = extension_filter + + def prepare_data(self) -> None: + # if the data is a Path or a str + if not isinstance(self.pred_data, np.ndarray): + self.pred_files = list_files( + self.pred_data, self.data_type, self.extension_filter + ) + else: + # reshape array + self.pred_data = reshape_array(self.pred_data, self.data_config.axes) + + def setup(self, stage: Optional[str] = None) -> None: + # if numpy array + if self.data_type == SupportedData.ARRAY: + # prediction dataset + self.predict_dataset = InMemoryPredictionDataset( + data_config=self.data_config, + data=self.pred_data, + tile_size=self.tile_size, + tile_overlap=self.tile_overlap, + ) + else: + self.predict_dataset = IterablePredictionDataset( + files=self.pred_files, + data_config=self.data_config, + read_source_func=self.read_source_func, + tile_size=self.tile_size, + tile_overlap=self.tile_overlap, + ) + + def predict_dataloader(self) -> Any: + return DataLoader( + self.predict_dataset, + batch_size=self.batch_size, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + ) + + +class CAREamicsTrainDataModule(CAREamicsWood): + def __init__( + self, + train_path: Union[str, Path], + val_path: Union[str, Path], + data_type: Union[str, SupportedData], + patch_size: List[int], + axes: str, + batch_size: int, + transforms: Optional[Union[List, Compose]] = None, + train_target_path: Optional[Union[str, Path]] = None, + val_target_path: Optional[Union[str, Path]] = None, + read_source_func: Optional[Callable] = None, + extension_filter: str = "", + val_percentage: float = 0.1, + val_minimum_patches: int = 5, + num_workers: int = 0, + pin_memory: bool = False, + **kwargs, + ) -> None: + data_config = { + "mode": "train", + "data_type": data_type, + "patch_size": patch_size, + "axes": axes, + "batch_size": batch_size, + "num_workers": num_workers, + "pin_memory": pin_memory, + } + + # if transforms are passed (otherwise it will use the default ones) + if transforms is not None: + data_config["transforms"] = transforms + + super().__init__( + data_config=DataModel(**data_config), + train_data=train_path, + val_data=val_path, + train_data_target=train_target_path, + val_data_target=val_target_path, + read_source_func=read_source_func, + extension_filter=extension_filter, + val_percentage=val_percentage, + val_minimum_split=val_minimum_patches, + ) + + +class CAREamicsPredictDataModule(CAREamicsClay): + def __init__( + self, + pred_path: Union[str, Path], + data_type: Union[str, SupportedData], + tile_size: List[int], + axes: str, + batch_size: int, + transforms: Optional[Union[List, Compose]] = None, + read_source_func: Optional[Callable] = None, + extension_filter: str = "", + num_workers: int = 0, + pin_memory: bool = False, + **kwargs, + ) -> None: + data_config = { + "mode": "predict", + "data_type": data_type, + "patch_size": tile_size, + "axes": axes, + "batch_size": batch_size, + "num_workers": num_workers, + "pin_memory": pin_memory, + } + + # TODO different default for prediction transforms?? Should not have + # ManipulateN2V + + # if transforms are passed (otherwise it will use the default ones) + if transforms is not None: + data_config["transforms"] = transforms + + super().__init__( + data_config=DataModel(**data_config), + pred_data=pred_path, + tile_size=tile_size, + tile_overlap=(48, 48), + read_source_func=read_source_func, + extension_filter=extension_filter, + ) diff --git a/src/careamics/losses/__init__.py b/src/careamics/losses/__init__.py index b960791e..82c8d7f3 100644 --- a/src/careamics/losses/__init__.py +++ b/src/careamics/losses/__init__.py @@ -1,7 +1,6 @@ """Losses module.""" -from .loss_factory import create_loss_function as create_loss_function -from .loss_factory import create_noise_model as create_noise_model -from .noise_models import GaussianMixtureNoiseModel as GaussianMixtureNoiseModel -from .noise_models import HistogramNoiseModel as HistogramNoiseModel +from .loss_factory import loss_factory as loss_factory +from .noise_model_factory import noise_model_factory as noise_model_factory +from .noise_models import GaussianMixtureNoiseModel, HistogramNoiseModel diff --git a/src/careamics/losses/loss_factory.py b/src/careamics/losses/loss_factory.py index 77366e92..4ca7b79a 100644 --- a/src/careamics/losses/loss_factory.py +++ b/src/careamics/losses/loss_factory.py @@ -3,24 +3,21 @@ This module contains a factory function for creating loss functions. """ -from typing import Callable, Type, Union +from typing import Callable -from ..config import Configuration from ..config.support import SupportedLoss -from ..config.noise_models import NoiseModelType -from .losses import dice_loss, mae_loss, mse_loss, n2v_loss, pn2v_loss -from .noise_models import GaussianMixtureNoiseModel, HistogramNoiseModel +from .losses import mae_loss, mse_loss, n2v_loss # TODO add tests # TODO add custom? -def create_loss_function(loss_type: SupportedLoss) -> Callable: - """Create loss function based on Configuration. +def loss_factory(loss: SupportedLoss) -> Callable: + """Return loss function. Parameters ---------- - config : Configuration - Configuration. + loss: SupportedLoss + Requested loss. Returns ------- @@ -32,60 +29,20 @@ def create_loss_function(loss_type: SupportedLoss) -> Callable: NotImplementedError If the loss is unknown. """ - if loss_type == SupportedLoss.N2V: + if loss == SupportedLoss.N2V: return n2v_loss # elif loss_type == SupportedLoss.PN2V: # return pn2v_loss - elif loss_type == SupportedLoss.MAE: + elif loss == SupportedLoss.MAE: return mae_loss - elif loss_type == SupportedLoss.MSE: + elif loss == SupportedLoss.MSE: return mse_loss # elif loss_type == SupportedLoss.DICE: # return dice_loss else: - raise NotImplementedError(f"Loss {loss_type} is not yet supported.") - - -def create_noise_model( - config: Configuration, -) -> Type[Union[HistogramNoiseModel, GaussianMixtureNoiseModel, None]]: - """Create loss model based on Configuration. - - Parameters - ---------- - config : Configuration - Configuration. - - Returns - ------- - Noise model - - Raises - ------ - NotImplementedError - If the noise model is unknown. - """ - noise_model_type = ( - config.algorithm.noise_model.model_type - if config.algorithm.noise_model - else None - ) - - if noise_model_type == NoiseModelType.HIST: - return HistogramNoiseModel - - elif noise_model_type == NoiseModelType.GMM: - return GaussianMixtureNoiseModel - - elif noise_model_type is None: - return None - - else: - raise NotImplementedError( - f"Noise model {noise_model_type} is not yet supported." - ) + raise NotImplementedError(f"Loss {loss} is not yet supported.") diff --git a/src/careamics/losses/losses.py b/src/careamics/losses/losses.py index 83b2c829..01c94238 100644 --- a/src/careamics/losses/losses.py +++ b/src/careamics/losses/losses.py @@ -3,10 +3,11 @@ This submodule contains the various losses used in CAREamics. """ -from typing import Type import torch -from segmentation_models_pytorch.losses import DiceLoss # TODO if we are only using the DiceLoss, can we just implement it? + +# TODO if we are only using the DiceLoss, can we just implement it? +# from segmentation_models_pytorch.losses import DiceLoss from torch.nn import L1Loss, MSELoss from .noise_models import HistogramNoiseModel @@ -26,7 +27,9 @@ def mse_loss(samples: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: def n2v_loss( - samples: torch.Tensor, labels: torch.Tensor, masks: torch.Tensor + manipulated_patches: torch.Tensor, + original_patches: torch.Tensor, + masks: torch.Tensor, ) -> torch.Tensor: """ N2V Loss function described in A Krull et al 2018. @@ -45,7 +48,7 @@ def n2v_loss( torch.Tensor Loss value. """ - errors = (labels - samples) ** 2 + errors = (original_patches - manipulated_patches) ** 2 # Average over pixels and batch loss = torch.sum(errors * masks) / torch.sum(masks) return loss @@ -75,9 +78,9 @@ def pn2v_loss( samples: torch.Tensor, labels: torch.Tensor, masks: torch.Tensor, - noise_model: Type[HistogramNoiseModel], -): - """Probabilistic N2V loss function described in A Krull et al 2019.""" + noise_model: HistogramNoiseModel, +) -> torch.Tensor: + """Probabilistic N2V loss function described in A Krull et al., CVF (2019).""" likelihoods = noise_model.likelihood(labels, samples) likelihoods_avg = torch.log(torch.mean(likelihoods, dim=0, keepdim=True)[0, ...]) diff --git a/src/careamics/losses/noise_model_factory.py b/src/careamics/losses/noise_model_factory.py new file mode 100644 index 00000000..fdab1182 --- /dev/null +++ b/src/careamics/losses/noise_model_factory.py @@ -0,0 +1,40 @@ +from typing import Type, Union + +from ..config.noise_models import NoiseModel, NoiseModelType +from .noise_models import GaussianMixtureNoiseModel, HistogramNoiseModel + + +def noise_model_factory( + noise_config: NoiseModel, +) -> Type[Union[HistogramNoiseModel, GaussianMixtureNoiseModel, None]]: + """Create loss model based on Configuration. + + Parameters + ---------- + config : Configuration + Configuration. + + Returns + ------- + Noise model + + Raises + ------ + NotImplementedError + If the noise model is unknown. + """ + noise_model_type = noise_config.model_type if noise_config else None + + if noise_model_type == NoiseModelType.HIST: + return HistogramNoiseModel + + elif noise_model_type == NoiseModelType.GMM: + return GaussianMixtureNoiseModel + + elif noise_model_type is None: + return None + + else: + raise NotImplementedError( + f"Noise model {noise_model_type} is not yet supported." + ) diff --git a/src/careamics/losses/noise_models.py b/src/careamics/losses/noise_models.py index 2158999e..b5486510 100644 --- a/src/careamics/losses/noise_models.py +++ b/src/careamics/losses/noise_models.py @@ -1,8 +1,3 @@ -############################################ -# The Noise Model -############################################ - - from abc import ABC, abstractmethod import numpy as np @@ -13,6 +8,7 @@ logger = get_logger(__name__) +# TODO here "Model" clashes a bit with the naming convention of the Pydantic Models class NoiseModel(ABC): """Base class for noise models.""" @@ -42,7 +38,7 @@ class HistogramNoiseModel(NoiseModel): """ def __init__(self, **kwargs): - a = kwargs + pass def instantiate(self, bins, min_value, max_value, observation, signal): """Creates a nD histogram from 'observation' and 'signal'. diff --git a/src/careamics/models/__init__.py b/src/careamics/models/__init__.py index cb2f6bae..09d5eb23 100644 --- a/src/careamics/models/__init__.py +++ b/src/careamics/models/__init__.py @@ -1,4 +1,4 @@ """Models package.""" -from .model_factory import create_model as create_model +from .model_factory import model_factory from .unet import UNet as UNet diff --git a/src/careamics/models/activation.py b/src/careamics/models/activation.py index 58b803cb..c102fbc9 100644 --- a/src/careamics/models/activation.py +++ b/src/careamics/models/activation.py @@ -33,4 +33,3 @@ def get_activation(activation: Union[SupportedActivation, str]) -> Callable: return nn.Identity() else: raise ValueError(f"Activation {activation} not supported.") - diff --git a/src/careamics/models/layers.py b/src/careamics/models/layers.py index 661d64f9..19168f5e 100644 --- a/src/careamics/models/layers.py +++ b/src/careamics/models/layers.py @@ -3,7 +3,7 @@ This submodule contains layers used in the CAREamics models. """ -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch import torch.nn as nn @@ -155,22 +155,26 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x -def _unpack_kernel_size(kernel_size: Union[Tuple[int], int], dim: int) -> Tuple[int]: +def _unpack_kernel_size( + kernel_size: Union[Tuple[int, ...], int], dim: int +) -> Tuple[int, ...]: """Unpack kernel_size to a tuple of ints. - Inspired by Kornia implementation. + Inspired by Kornia implementation. TODO: link """ if isinstance(kernel_size, int): - kernel_dims = [kernel_size for _ in range(dim)] + kernel_dims = tuple([kernel_size for _ in range(dim)]) else: kernel_dims = kernel_size return kernel_dims -def _compute_zero_padding(kernel_size: Union[Tuple[int], int], dim: int) -> Tuple[int]: +def _compute_zero_padding( + kernel_size: Union[Tuple[int, ...], int], dim: int +) -> Tuple[int, ...]: """Utility function that computes zero padding tuple.""" kernel_dims = _unpack_kernel_size(kernel_size, dim) - return [(kd - 1) // 2 for kd in kernel_dims] + return tuple([(kd - 1) // 2 for kd in kernel_dims]) def get_pascal_kernel_1d( @@ -180,9 +184,9 @@ def get_pascal_kernel_1d( device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> torch.Tensor: - """Generate Yang Hui triangle (Pascal's triangle) by a given number. + """Generate Yang Hui triangle (Pascal's triangle) for a given number. - Inspired by Kornia implementation. + Inspired by Kornia implementation. TODO link Parameters ---------- @@ -210,8 +214,8 @@ def get_pascal_kernel_1d( >>> get_pascal_kernel_1d(6) tensor([ 1., 5., 10., 10., 5., 1.]) """ - pre: list[float] = [] - cur: list[float] = [] + pre: List[float] = [] + cur: List[float] = [] for i in range(kernel_size): cur = [1.0] * (i + 1) @@ -230,7 +234,7 @@ def get_pascal_kernel_1d( return out -def get_pascal_kernel_nd( +def _get_pascal_kernel_nd( kernel_size: Union[Tuple[int, int], int], norm: bool = True, dim: int = 2, @@ -368,7 +372,7 @@ class MaxBlurPool(nn.Module): def __init__( self, dim: int, - kernel_size: Union[Tuple[int], int], + kernel_size: Union[Tuple[int, int], int], stride: int = 2, max_pool_size: int = 2, ceil_mode: bool = False, @@ -379,24 +383,24 @@ def __init__( self.stride = stride self.max_pool_size = max_pool_size self.ceil_mode = ceil_mode - self.kernel = get_pascal_kernel_nd(kernel_size, norm=True, dim=self.dim) + self.kernel = _get_pascal_kernel_nd(kernel_size, norm=True, dim=self.dim) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass of the function.""" self.kernel = torch.as_tensor(self.kernel, device=x.device, dtype=x.dtype) if self.dim == 2: return _max_blur_pool_by_kernel2d( - x, - self.kernel.repeat((x.size(1), 1, 1, 1)), - self.stride, - self.max_pool_size, - self.ceil_mode, - ) + x, + self.kernel.repeat((x.size(1), 1, 1, 1)), + self.stride, + self.max_pool_size, + self.ceil_mode, + ) else: return _max_blur_pool_by_kernel3d( - x, - self.kernel.repeat((x.size(1), 1, 1, 1, 1)), - self.stride, - self.max_pool_size, - self.ceil_mode, - ) + x, + self.kernel.repeat((x.size(1), 1, 1, 1, 1)), + self.stride, + self.max_pool_size, + self.ceil_mode, + ) diff --git a/src/careamics/models/model_factory.py b/src/careamics/models/model_factory.py index d8bdfbbc..5409fbb8 100644 --- a/src/careamics/models/model_factory.py +++ b/src/careamics/models/model_factory.py @@ -3,14 +3,11 @@ Model creation factory functions. """ -from pathlib import Path -from typing import Dict, Optional, Tuple, Union +from typing import Union import torch -from ..bioimage import import_bioimage_model -from ..config import Configuration -from ..config.architectures import UNetModel +from ..config.architectures import CustomModel, UNetModel, VAEModel, get_custom_model from ..config.support import SupportedArchitecture from ..utils.logging import get_logger from .unet import UNet @@ -18,16 +15,17 @@ logger = get_logger(__name__) -# TODO rename model factory -def model_registry(model_configuration: UNetModel) -> torch.nn.Module: +def model_factory( + model_configuration: Union[UNetModel, VAEModel, CustomModel] +) -> torch.nn.Module: """ - Model factory. + Deep learning model factory. - Supported models are defined in careamics.config.architectures.Architectures. + Supported models are defined in careamics.config.SupportedArchitecture. Parameters ---------- - model_configuration : UNetModel + model_configuration : Union[UNetModel, VAEModel] Model configuration Returns @@ -38,193 +36,16 @@ def model_registry(model_configuration: UNetModel) -> torch.nn.Module: Raises ------ NotImplementedError - If the requested model is not implemented. + If the requested architecture is not implemented. """ if model_configuration.architecture == SupportedArchitecture.UNET: - return UNet( - **dict(model_configuration) - ) + return UNet(**dict(model_configuration)) + elif model_configuration.architecture == SupportedArchitecture.CUSTOM: + assert isinstance(model_configuration, CustomModel) + model = get_custom_model(model_configuration.name) + + return model(**model_configuration.parameters) else: raise NotImplementedError( f"Model {model_configuration.architecture} is not implemented or unknown." ) - -# TODO needs to go? -# TODO: split into two functions -def create_model( - *, - model_path: Optional[Union[str, Path]] = None, - config: Optional[Configuration] = None, - device: Optional[torch.device] = None, -) -> torch.nn.Module: - """ - Instantiate a model from a checkpoint or configuration. - - If both checkpoint and configuration are provided, the checkpoint is used. - - Parameters - ---------- - model_path : Optional[Union[str, Path]], optional - Path to a checkpoint, by default None. - config : Optional[Configuration], optional - Configuration, by default None. - device : Optional[torch.device], optional - Torch device, by default None. - - Returns - ------- - torch.nn.Module - Instantiated model. - - Raises - ------ - ValueError - If the checkpoint path is invalid. - ValueError - If the checkpoint is invalid. - ValueError - If neither checkpoint nor configuration are provided. - """ - if model_path is not None: - # Create model from checkpoint - model_path = Path(model_path) - if not model_path.exists() or model_path.suffix not in [".pth", ".zip"]: - raise ValueError( - f"Invalid model path: {model_path}. Current working dir: \ - {Path.cwd()!s}" - ) - - if model_path.suffix == ".zip": - model_path = import_bioimage_model(model_path) - - # Load checkpoint - checkpoint = torch.load(model_path, map_location=device) - - # Load the configuration - if "config" in checkpoint: - config = Configuration(**checkpoint["config"]) - algo_config = config.algorithm - model_config = algo_config.model.parameters - model_name = algo_config.model.architecture - else: - raise ValueError("Invalid checkpoint format, no configuration found.") - - # Create model - model = model_registry(model_config) - model.to(device) - # Load the model state dict - if "model_state_dict" in checkpoint: - model.load_state_dict(checkpoint["model_state_dict"]) - logger.info("Loaded model state dict") - else: - raise ValueError("Invalid checkpoint format") - - # Load the optimizer and scheduler - optimizer, scheduler = get_optimizer_and_scheduler( - config, model, state_dict=checkpoint - ) - scaler = get_grad_scaler(config, state_dict=checkpoint) - - elif config is not None: - # Create model from configuration - algo_config = config.algorithm - model_config = algo_config.model.parameters - model_name = algo_config.model.architecture - - # Create model - model = model_registry(model_name, algo_config.get_conv_dim(), model_config) - model.to(device) - optimizer, scheduler = get_optimizer_and_scheduler(config, model) - scaler = get_grad_scaler(config) - logger.info("Engine initialized from configuration") - - else: - raise ValueError("Either config or model_path must be provided") - return model, optimizer, scheduler, scaler, config - - -def get_optimizer_and_scheduler( - config: Configuration, model: torch.nn.Module, state_dict: Optional[Dict] = None -) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LRScheduler]: - """ - Create optimizer and learning rate schedulers. - - If a checkpoint state dictionary is provided, the optimizer and scheduler are - instantiated to the same state as the checkpoint's optimizer and scheduler. - - Parameters - ---------- - config : Configuration - Configuration. - model : torch.nn.Module - Model. - state_dict : Optional[Dict], optional - Checkpoint state dictionary, by default None. - - Returns - ------- - Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LRScheduler] - Optimizer and scheduler. - """ - # retrieve optimizer name and parameters from config - optimizer_name = config.training.optimizer.name - optimizer_params = config.training.optimizer.parameters - - # then instantiate it - optimizer_func = getattr(torch.optim, optimizer_name) - optimizer = optimizer_func(model.parameters(), **optimizer_params) - - # same for learning rate scheduler - scheduler_name = config.training.lr_scheduler.name - scheduler_params = config.training.lr_scheduler.parameters - scheduler_func = getattr(torch.optim.lr_scheduler, scheduler_name) - scheduler = scheduler_func(optimizer, **scheduler_params) - - # load state from ther checkpoint if available - if state_dict is not None: - if "optimizer_state_dict" in state_dict: - optimizer.load_state_dict(state_dict["optimizer_state_dict"]) - logger.info("Loaded optimizer state dict") - else: - logger.warning( - "No optimizer state dict found in checkpoint. Optimizer not loaded." - ) - if "scheduler_state_dict" in state_dict: - scheduler.load_state_dict(state_dict["scheduler_state_dict"]) - logger.info("Loaded LR scheduler state dict") - else: - logger.warning( - "No LR scheduler state dict found in checkpoint. " - "LR scheduler not loaded." - ) - return optimizer, scheduler - - -def get_grad_scaler( - config: Configuration, state_dict: Optional[Dict] = None -) -> torch.cuda.amp.GradScaler: - """ - Instantiate gradscaler. - - If a checkpoint state dictionary is provided, the scaler is instantiated to the - same state as the checkpoint's scaler. - - Parameters - ---------- - config : Configuration - Configuration. - state_dict : Optional[Dict], optional - Checkpoint state dictionary, by default None. - - Returns - ------- - torch.cuda.amp.GradScaler - Instantiated gradscaler. - """ - use = config.training.amp.use - scaling = config.training.amp.init_scale - scaler = torch.cuda.amp.GradScaler(init_scale=scaling, enabled=use) - if state_dict is not None and "scaler_state_dict" in state_dict: - scaler.load_state_dict(state_dict["scaler_state_dict"]) - logger.info("Loaded GradScaler state dict") - return scaler diff --git a/src/careamics/models/unet.py b/src/careamics/models/unet.py index 4486529d..f3a41fd3 100644 --- a/src/careamics/models/unet.py +++ b/src/careamics/models/unet.py @@ -8,9 +8,9 @@ import torch import torch.nn as nn -from .layers import Conv_Block, MaxBlurPool -from .activation import get_activation from ..config.support import SupportedActivation +from .activation import get_activation +from .layers import Conv_Block, MaxBlurPool class UnetEncoder(nn.Module): @@ -68,9 +68,6 @@ def __init__( """ super().__init__() - # TODO: what's this commented line? - # pooling_op = "MaxBlurPool" if n2v2 else "MaxPool" - self.pooling = ( getattr(nn, f"MaxPool{conv_dim}d")(kernel_size=pool_kernel) if not n2v2 @@ -217,8 +214,8 @@ def forward(self, *features: List[torch.Tensor]) -> torch.Tensor: torch.Tensor Output of the decoder. """ - x = features[0] - skip_connections = features[1:][::-1] + x: torch.Tensor = features[0] + skip_connections: torch.Tensor = features[1:][::-1] x = self.bottleneck(x) diff --git a/src/careamics/prediction/prediction_utils.py b/src/careamics/prediction/prediction_utils.py index fa3a462e..ffe9419c 100644 --- a/src/careamics/prediction/prediction_utils.py +++ b/src/careamics/prediction/prediction_utils.py @@ -48,7 +48,7 @@ def stitch_prediction( # Insert cropped tile into predicted image using stitch coordinates predicted_image[ (..., *[slice(c[0], c[1]) for c in stitch_coords]) - ] = cropped_tile + ] = cropped_tile.to(torch.float32) return predicted_image diff --git a/src/careamics/transforms/__init__.py b/src/careamics/transforms/__init__.py index 4132fe81..5b50ea1c 100644 --- a/src/careamics/transforms/__init__.py +++ b/src/careamics/transforms/__init__.py @@ -1,5 +1,9 @@ -"""""" +"""Transforms that are used to augment the data.""" -from .manipulate_n2v import ManipulateN2V -from .normalize_without_target import NormalizeWithoutTarget \ No newline at end of file +__all__ = ["N2VManipulateUniform", "N2VManipulateMedian", "NDFlip", "XYRandomRotate90"] + + +from .manipulate_n2v import N2VManipulateMedian, N2VManipulateUniform +from .nd_flip import NDFlip +from .xy_random_rotate90 import XYRandomRotate90 diff --git a/src/careamics/transforms/manipulate_n2v.py b/src/careamics/transforms/manipulate_n2v.py index f1cb0196..7eae2595 100644 --- a/src/careamics/transforms/manipulate_n2v.py +++ b/src/careamics/transforms/manipulate_n2v.py @@ -1,10 +1,70 @@ -from albumentations import ImageOnlyTransform +from typing import Optional, Tuple, Union + import numpy as np +from albumentations import ImageOnlyTransform -from .pixel_manipulation import default_manipulate +from careamics.config.support import SupportedPixelManipulation +from .pixel_manipulation import median_manipulate, uniform_manipulate -class ManipulateN2V(ImageOnlyTransform): + +# TODO add median vs random replace +class N2VManipulateUniform(ImageOnlyTransform): + """ + Default augmentation for the N2V model. + + This transform expects S(Z)YXC dimensions. + + # TODO add more details, in paritcular what happens to channels and Z in the masking + + Parameters + ---------- + mask_pixel_percentage : float + Approximate percentage of pixels to be masked. + roi_size : int + Size of the ROI the new pixel value is sampled from, by default 11. + """ + + def __init__( + self, + roi_size: int = 11, + masked_pixel_percentage: float = 0.2, + strategy: Union[ + str, SupportedPixelManipulation + ] = SupportedPixelManipulation.UNIFORM, + struct_mask: Optional[np.ndarray] = None, + ): + super().__init__(p=1) + self.masked_pixel_percentage = masked_pixel_percentage + self.roi_size = roi_size + self.strategy = strategy + self.struct_mask = struct_mask + + def apply(self, patch: np.ndarray, **kwargs: dict) -> np.ndarray: + """Apply the transform to the image. + + Parameters + ---------- + image : np.ndarray + Image or image patch, 2D or 3D, shape (y, x, c) or (z, y, x, c). + """ + if self.strategy == SupportedPixelManipulation.UNIFORM: + masked, mask = uniform_manipulate( + patch=patch, + mask_pixel_percentage=self.masked_pixel_percentage, + subpatch_size=self.roi_size, + struct_mask_params=self.struct_mask, # TODO add remove center param + ) + else: + raise ValueError(f"Strategy {self.strategy} not supported.") + + return masked, patch, mask + + def get_transform_init_args_names(self) -> Tuple[str, ...]: + return ("roi_size", "masked_pixel_percentage", "strategy", "struct_mask") + + +class N2VManipulateMedian(ImageOnlyTransform): """ Default augmentation for the N2V model. @@ -35,8 +95,7 @@ def apply(self, image, **params): image : np.ndarray Image or image patch, 2D or 3D, shape (c, y, x) or (c, z, y, x). """ - masked, original, mask = default_manipulate( + masked, original, mask = median_manipulate( image, self.masked_pixel_percentage, self.roi_size, self.struct_mask ) return masked, original, mask - diff --git a/src/careamics/transforms/nd_flip.py b/src/careamics/transforms/nd_flip.py new file mode 100644 index 00000000..b58bc597 --- /dev/null +++ b/src/careamics/transforms/nd_flip.py @@ -0,0 +1,68 @@ +from typing import Any, Dict, Tuple + +import numpy as np +from albumentations import DualTransform + + +class NDFlip(DualTransform): + """Flip ND arrays on a single axis. + + This transform ignores singleton axes and randomly flips one of the other + axes, to the exception of the first and last axes (sample and channels). + + This transform expects (Z)YXC dimensions. + """ + + def __init__(self, p: float = 0.5, is_3D: bool = False, flip_z: bool = True): + super().__init__(p=p) + + self.is_3D = is_3D + self.flip_z = flip_z + + # "flippable" axes + if is_3D: + self.axis_indices = [0, 1, 2] if flip_z else [1, 2] + else: + self.axis_indices = [0, 1] + + def get_params(self, **kwargs: Any) -> Dict[str, int]: + return {"flip_axis": np.random.choice(self.axis_indices)} + + def apply(self, patch: np.ndarray, flip_axis: int, **kwargs: Any) -> np.ndarray: + """Apply the transform to the image. + + Parameters + ---------- + patch : np.ndarray + Image or image patch, 2D or 3D, shape (y, x, c) or (z, y, x, c). + flip_axis : int + Axis along which to flip the patch. + """ + if len(patch.shape) == 3 and self.is_3D: + raise ValueError( + "Incompatible patch shape and dimensionality. ZYXC patch shape " + "expected, but got YXC shape." + ) + + return np.ascontiguousarray(np.flip(patch, axis=flip_axis)) + + def apply_to_mask( + self, mask: np.ndarray, flip_axis: int, **kwargs: Any + ) -> np.ndarray: + """Apply the transform to the mask. + + Parameters + ---------- + mask : np.ndarray + Mask or mask patch, 2D or 3D, shape (y, x, c) or (z, y, x, c). + """ + if len(mask.shape) == 3 and self.is_3D: + raise ValueError( + "Incompatible mask shape and dimensionality. ZYXC patch shape " + "expected, but got YXC shape." + ) + + return np.ascontiguousarray(np.flip(mask, axis=flip_axis)) + + def get_transform_init_args_names(self, **kwargs) -> Tuple[str, ...]: + return ("is_3D", "flip_z") diff --git a/src/careamics/transforms/normalize_without_target.py b/src/careamics/transforms/normalize_without_target.py deleted file mode 100644 index b16ea79d..00000000 --- a/src/careamics/transforms/normalize_without_target.py +++ /dev/null @@ -1,36 +0,0 @@ -""" module.""" -import albumentations as Aug - - -class NormalizeWithoutTarget(Aug.DualTransform): - """ - Normalize the image with a mask. - - Parameters - ---------- - mean : float - Mean value. - std : float - Standard deviation. - """ - - def __init__( - self, - mean: float, - std: float, - max_pixel_value=1, - always_apply=False, - p=1.0, - ): - super().__init__(always_apply, p) - self.mean = mean - self.std = std - self.max_pixel_value = max_pixel_value - - def apply(self, image, **params): - """Apply the transform to the mask.""" - return Aug.functional.normalize(image, self.mean, self.std, self.max_pixel_value) - - def apply_to_mask(self, target, **params): - """Apply the transform to the mask.""" - return Aug.functional.normalize(target, self.mean, self.std, self.max_pixel_value) diff --git a/src/careamics/transforms/pixel_manipulation.py b/src/careamics/transforms/pixel_manipulation.py index 87bf9f5f..872b0a7a 100644 --- a/src/careamics/transforms/pixel_manipulation.py +++ b/src/careamics/transforms/pixel_manipulation.py @@ -4,12 +4,12 @@ Pixel manipulation is used in N2V and similar algorithm to replace the value of masked pixels. """ -from typing import Optional, Tuple +from typing import List, Optional, Tuple import numpy as np -def _apply_struct_mask(patch, coords, mask): +def _apply_struct_mask(patch: np.ndarray, coords: np.ndarray, mask_params: List[int]): """Applies structN2V mask to patch. Each point in coords corresponds to the center of the mask. @@ -20,24 +20,30 @@ def _apply_struct_mask(patch, coords, mask): patch : np.ndarray Patch to be manipulated. coords : np.ndarray - Coordinates of the pixels to be manipulated. - mask : np.ndarray - Mask to be applied. + Coordinates of the ROI(subpatch) centers. + mask_params : list + Axis and span across center for the structN2V mask. """ - mask = np.array(mask) - ndim = mask.ndim + struct_axis, struct_span = mask_params + # Create a mask array + mask = np.expand_dims(np.ones(struct_span), axis=list(range(len(patch.shape) - 1))) + # Move the struct axis to the first position for indexing + mask = np.moveaxis(mask, 0, struct_axis) center = np.array(mask.shape) // 2 - # leave the center value alone + # Mark the center mask[tuple(center.T)] = 0 # displacements from center dx = np.indices(mask.shape)[:, mask == 1] - center[:, None] # combine all coords (ndim, npts,) with all displacements (ncoords,ndim,) mix = dx.T[..., None] + coords.T[None] - mix = mix.transpose([1, 0, 2]).reshape([ndim, -1]).T + mix = mix.transpose([1, 0, 2]).reshape([mask.ndim, -1]).T # stay within patch boundary - mix = mix.clip(min=np.zeros(ndim), max=np.array(patch.shape) - 1).astype(np.uint8) + # TODO this will fail if center is on the edge! + mix = mix.clip(min=np.zeros(mask.ndim), max=np.array(patch.shape) - 1).astype( + np.uint8 + ) # replace neighbouring pixels with random values from flat dist - patch[tuple(mix.T)] = np.random.rand(mix.shape[0]) * 4 - 2 + patch[tuple(mix.T)] = np.random.uniform(patch.min(), patch.max(), size=mix.shape[0]) return patch @@ -91,13 +97,15 @@ def _get_stratified_coords( """ rng = np.random.default_rng() - # Define the approximate distance between masked pixels - mask_pixel_distance = np.round((100 / mask_pixel_perc) ** (1 / len(shape))).astype( - np.int32 - ) + # Define the approximate distance between masked pixels. Subtracts 1 form the shape + # to account for the channel dimension + mask_pixel_distance = np.round( + (100 / mask_pixel_perc) ** (1 / (len(shape) - 1)) + ).astype(np.int32) # Define a grid of coordinates for each axis in the input patch and the step size pixel_coords = [] + steps = [] for axis_size in shape: # make sure axis size is evenly divisible by box size num_pixels = int(np.ceil(axis_size / mask_pixel_distance)) @@ -106,13 +114,14 @@ def _get_stratified_coords( ) # explain pixel_coords.append(axis_pixel_coords.T) + steps.append(step) # Create a meshgrid of coordinates for each axis in the input patch coordinate_grid_list = np.meshgrid(*pixel_coords) coordinate_grid = np.array(coordinate_grid_list).reshape(len(shape), -1).T grid_random_increment = rng.integers( - _odd_jitter_func(float(step), rng) + _odd_jitter_func(float(max(steps)), rng) * np.ones_like(coordinate_grid).astype(np.int32) - 1, size=coordinate_grid.shape, @@ -123,12 +132,72 @@ def _get_stratified_coords( return coordinate_grid -# TODO channels: masking the same pixel across channels? -def default_manipulate( +def _create_subpatch_center_mask( + subpatch: np.ndarray, center_coords: np.ndarray +) -> np.ndarray: + """Create a mask with the center of the subpatch masked. + + Parameters + ---------- + subpatch : np.ndarray + Subpatch to be manipulated. + center_coords : np.ndarray + Coordinates of the original center before possible crop. + + Returns + ------- + np.ndarray + Mask with the center of the subpatch masked. + """ + mask = np.ones(subpatch.shape) + mask[tuple(center_coords.T)] = 0 + return np.ma.make_mask(mask) + + +def _create_subpatch_struct_mask( + subpatch: np.ndarray, center_coords: np.ndarray, struct_mask_params: List[int] +) -> np.ndarray: + """Create a structN2V mask for the subpatch. + + Parameters + ---------- + subpatch : np.ndarray + Subpatch to be manipulated. + center_coords : np.ndarray + Coordinates of the original center before possible crop. + struct_mask_params : list + Axis and span across center for the structN2V mask. + + Returns + ------- + np.ndarray + StructN2V mask for the subpatch. + """ + # Create a mask with the center of the subpatch masked + mask_placeholder = np.ones(subpatch.shape) + struct_axis, struct_span = struct_mask_params + # reshape to move the struct axis to the first position + mask_reshaped = np.moveaxis(mask_placeholder, struct_axis, 0) + # create the mask index for the struct axis + mask_index = slice( + max(0, center_coords.take(struct_axis) - (struct_span - 1) // 2), + min( + 1 + center_coords.take(struct_axis) + (struct_span - 1) // 2, + subpatch.shape[struct_axis], + ), + ) + mask_reshaped[struct_axis][mask_index] = 0 + # reshape back to the original shape + mask = np.moveaxis(mask_reshaped, 0, struct_axis) + return np.ma.make_mask(mask) + + +def uniform_manipulate( patch: np.ndarray, mask_pixel_percentage: float, - roi_size: int = 11, - struct_mask: Optional[np.ndarray] = None, + subpatch_size: int = 11, + remove_center: bool = True, + struct_mask_params: Optional[List[int]] = None, ) -> Tuple[np.ndarray, ...]: """ Manipulate pixel in a patch, i.e. replace the masked value. @@ -139,104 +208,130 @@ def default_manipulate( Image patch, 2D or 3D, shape (y, x) or (z, y, x). mask_pixel_percentage : floar Approximate percentage of pixels to be masked. - roi_size : int - Size of the ROI the new pixel value is sampled from, by default 11. - augmentations : Callable, optional - Augmentations to apply, by default None. + subpatch_size : int + Size of the subpatch the new pixel value is sampled from, by default 11. + remove_center : bool + Whether to remove the center pixel from the subpatch, by default False. See + uniform with/without central pixel in the documentation. #TODO add link + struct_mask_params : optional + Axis and span across center for the structN2V mask. Returns ------- Tuple[np.ndarray] Tuple containing the manipulated patch, the original patch and the mask. """ - #TODO this assumes patch has no channel dimension. Is this correct? - patch = patch.squeeze() - original_patch = patch.copy() - - # TODO: struct mask could be generated from roi center and later removed from grid as well # Get the coordinates of the pixels to be replaced - roi_centers = _get_stratified_coords(mask_pixel_percentage, patch.shape) + transformed_patch = patch.copy() + + subpatch_centers = _get_stratified_coords(mask_pixel_percentage, patch.shape) rng = np.random.default_rng() - # Generate coordinate grid for ROI - roi_span_full = np.arange(-np.floor(roi_size / 2), np.ceil(roi_size / 2)).astype( - np.int32 - ) + # Generate coordinate grid for subpatch + roi_span_full = np.arange( + -np.floor(subpatch_size / 2), np.ceil(subpatch_size / 2) + ).astype(np.int32) - # Remove the center pixel from the grid - roi_span_wo_center = roi_span_full[roi_span_full != 0] + # Remove the center pixel from the grid if needed + roi_span = roi_span_full[roi_span_full != 0] if remove_center else roi_span_full # Randomly select coordinates from the grid - random_increment = rng.choice(roi_span_wo_center, size=roi_centers.shape) + random_increment = rng.choice(roi_span, size=subpatch_centers.shape) # Clip the coordinates to the patch size - # TODO: just to check, shouldn't the maximum be roi_center+patch.shape/2? rather than path.shape - replacement_coords: np.ndarray = np.clip( - roi_centers + random_increment, + replacement_coords = np.clip( + subpatch_centers + random_increment, 0, [patch.shape[i] - 1 for i in range(len(patch.shape))], ) - # Get the replacement pixels from all rois + # Get the replacement pixels from all subpatchs replacement_pixels = patch[tuple(replacement_coords.T.tolist())] - + struct_mask = None # Replace the original pixels with the replacement pixels - patch[tuple(roi_centers.T.tolist())] = replacement_pixels - - # Create corresponding mask - mask = np.where(patch != original_patch, 1, 0).astype(np.uint8) + transformed_patch[tuple(subpatch_centers.T.tolist())] = replacement_pixels + mask = np.where(transformed_patch != patch, 1, 0).astype(np.uint8) - if struct_mask is not None: - patch = _apply_struct_mask(patch, roi_centers, struct_mask) + if struct_mask_params is not None: + transformed_patch = _apply_struct_mask( + transformed_patch, subpatch_centers, struct_mask + ) - # Expand the dimensions of the arrays to return the channel dimension - #TODO Should it be done here? done at all? return ( - np.expand_dims(patch, 0), - np.expand_dims(original_patch, 0), # TODO is this necessary to return the original patch? - np.expand_dims(mask, 0), + transformed_patch, + mask, ) -# TODO: fix -# TODO: create tests -# TODO: find an optimized way in np without for loop -# TODO: add struct mask + def median_manipulate( patch: np.ndarray, mask_pixel_percentage: float, - roi_size: int = 11, - struct_mask: Optional[np.ndarray] = None, + subpatch_size: int = 11, + struct_mask_params: Optional[List[int]] = None, ) -> Tuple[np.ndarray, ...]: """ - works on the assumption that it is 2D or 3D image - """ - #TODO this assumes patch has no channel dimension. Is this correct? - patch = patch.squeeze() - mask = np.zeros_like(patch) - original_patch = patch.copy() + Manipulate pixel in a patch, i.e. replace the masked value. + + Parameters + ---------- + patch : np.ndarray + Image patch, 2D or 3D, shape (y, x) or (z, y, x). + mask_pixel_percentage : floar + Approximate percentage of pixels to be masked. + subpatch_size : int + Size of the subpatch the new pixel value is sampled from, by default 11. + struct_mask_params: optional, + Axis and span across center for the structN2V mask. + Returns + ------- + Tuple[np.ndarray] + Tuple containing the manipulated patch, the original patch and the mask. + """ + transformed_patch = patch.copy() # Get the coordinates of the pixels to be replaced - roi_centers = _get_stratified_coords(mask_pixel_percentage, patch.shape) + subpatch_centers = _get_stratified_coords(mask_pixel_percentage, patch.shape) + + # Generate coordinate grid for subpatch + roi_span = np.array( + [-np.floor(subpatch_size / 2), np.ceil(subpatch_size / 2)] + ).astype(np.int32) - for center in roi_centers: - min_coord = [max(0, c - roi_size // 2) for c in center] - max_coord = [min(s, c + roi_size // 2 + 1) for s, c in zip(patch.shape, center)] + subpatch_crops_span_full = subpatch_centers[np.newaxis, ...].T + roi_span - coords = [ - slice(min_coord[i], max_coord[i]) - for i in range(patch.ndim) + # Dimensions n dims, n centers, (min, max) + subpatch_crops_span_clipped = np.clip( + subpatch_crops_span_full, + a_min=np.zeros_like(patch.shape)[:, np.newaxis, np.newaxis], + a_max=np.array(patch.shape)[:, np.newaxis, np.newaxis] - 1, + ) + + for idx in range(subpatch_crops_span_clipped.shape[1]): + subpatch_coords = subpatch_crops_span_clipped[:, idx, ...] + idxs = [ + slice(x[0], x[1]) if x[1] - x[0] > 0 else slice(0, 1) + for x in subpatch_coords ] + subpatch = patch[tuple(idxs)] + if struct_mask_params is None: + subpatch_mask = _create_subpatch_center_mask( + subpatch, subpatch_centers[idx] + ) + else: + subpatch_mask = _create_subpatch_struct_mask( + subpatch, subpatch_centers[idx], struct_mask_params + ) + transformed_patch[tuple(subpatch_centers[idx].tolist())] = np.median( + subpatch[subpatch_mask] + ) - # extract roi around center - roi = patch[tuple(coords)] + mask = np.where(transformed_patch != patch, 1, 0).astype(np.uint8) - # replace center pixel by median - patch[tuple(center)] = np.median(roi) - mask[tuple(center)] = 1 + if struct_mask_params is not None: + transformed_patch = _apply_struct_mask( + transformed_patch, subpatch_centers, struct_mask_params + ) return ( - np.expand_dims(patch, 0), - np.expand_dims(original_patch, 0), - np.expand_dims(mask, 0), + transformed_patch, + mask, ) - - diff --git a/src/careamics/transforms/xy_random_rotate90.py b/src/careamics/transforms/xy_random_rotate90.py new file mode 100644 index 00000000..99eccdea --- /dev/null +++ b/src/careamics/transforms/xy_random_rotate90.py @@ -0,0 +1,64 @@ +from typing import Any, Dict, Tuple + +import numpy as np +from albumentations import DualTransform + + +class XYRandomRotate90(DualTransform): + """Applies random 90 degree rotations to the YX axis. + + This transform expects (Z)YXC dimensions. + """ + + def __init__(self, p: int = 0.5, is_3D: bool = False): + super().__init__(p=p) + + self.is_3D = is_3D + + # rotation axes + if is_3D: + self.axes = (1, 2) + else: + self.axes = (0, 1) + + def get_params(self, **kwargs: Any) -> Dict[str, int]: + return {"n_rotations": np.random.randint(1, 4)} + + def apply(self, patch: np.ndarray, n_rotations: int, **kwargs: Any) -> np.ndarray: + """Apply the transform to the image. + + Parameters + ---------- + patch : np.ndarray + Image or image patch, 2D or 3D, shape (y, x, c) or (z, y, x, c). + flip_axis : int + Axis along which to flip the patch. + """ + if len(patch.shape) == 3 and self.is_3D: + raise ValueError( + "Incompatible patch shape and dimensionality. ZYXC patch shape " + "expected, but got YXC shape." + ) + + return np.ascontiguousarray(np.rot90(patch, k=n_rotations, axes=self.axes)) + + def apply_to_mask( + self, mask: np.ndarray, n_rotations: int, **kwargs: Any + ) -> np.ndarray: + """Apply the transform to the mask. + + Parameters + ---------- + mask : np.ndarray + Mask or mask patch, 2D or 3D, shape (y, x, c) or (z, y, x, c). + """ + if len(mask.shape) != 4 and self.is_3D: + raise ValueError( + "Incompatible mask shape and dimensionality. ZYXC patch shape " + "expected, but got YXC shape." + ) + + return np.ascontiguousarray(np.rot90(mask, k=n_rotations, axes=self.axes)) + + def get_transform_init_args_names(self) -> Tuple[str]: + return "is_3D" diff --git a/src/careamics/utils/__init__.py b/src/careamics/utils/__init__.py index 936dd0eb..ce2e31ea 100644 --- a/src/careamics/utils/__init__.py +++ b/src/careamics/utils/__init__.py @@ -5,22 +5,26 @@ "denormalize", "normalize", "RunningStats" "get_device", - "check_external_array_validity", "check_axes_validity", "check_tiling_validity", "cwd", "compile_model", "MetricTracker", "get_ram_size", + "method_dispatch", + "check_path_exists", + "BaseEnum", ] +from .base_enum import BaseEnum from .context import cwd +from .method_dispatch import method_dispatch from .metrics import MetricTracker -from .misc import get_ram_size from .normalization import RunningStats, denormalize, normalize +from .path_utils import check_path_exists +from .ram import get_ram_size from .validators import ( check_axes_validity, - check_external_array_validity, check_tiling_validity, ) diff --git a/src/careamics/utils/base_enum.py b/src/careamics/utils/base_enum.py new file mode 100644 index 00000000..37e2ed65 --- /dev/null +++ b/src/careamics/utils/base_enum.py @@ -0,0 +1,23 @@ +from enum import Enum, EnumMeta + + +class _ContainerEnum(EnumMeta): + def __contains__(cls, item) -> bool: + try: + cls(item) + except ValueError: + return False + return True + + @classmethod + def has_value(cls, value): + return value in cls._value2member_map_ + + +class BaseEnum(Enum, metaclass=_ContainerEnum): + """Base Enum class, allowing checking if a value is in the enum. + + >>> "value" in BaseEnumExtension + """ + + pass diff --git a/src/careamics/utils/method_dispatch.py b/src/careamics/utils/method_dispatch.py new file mode 100644 index 00000000..155d67a6 --- /dev/null +++ b/src/careamics/utils/method_dispatch.py @@ -0,0 +1,55 @@ +import inspect +from functools import singledispatch, update_wrapper +from typing import Callable + + +def method_dispatch(method: Callable) -> Callable: + """Single dispatch decorator for instance methods. + + Since functools.singledispatch does not support instance methods, as the dispatch + is based on the first argument (`self` for instance methods), this decorator + uses singledispatch to dispatch the call based on the second argument. + + (Barely) adapted from Zero Piraeus: + https://stackoverflow.com/a/24602374 + + Parameters + ---------- + method : Callable + Decorated method. + + Returns + ------- + Callable + Wrapper around the method that dispatches the call based on the second argument. + """ + # create single dispatch from the function + dispatcher = singledispatch(method) + + # define a wrapper to dispatch the function based on the second argument + def wrapper(*args, **kw): + if len(args) + len(kw) < 2: + raise ValueError(f"Missing argument to {method}.") + + # if the second argument was passed as a keyword argument, we use its class + # for the dispatch + if len(args) == 1: + # get signature of the method + parameter = list(inspect.signature(method).parameters)[1] + + # raise error if the parameter is not in the keyword arguments + if parameter not in kw: + raise ValueError(f"Missing argument {parameter} to {method}.") + + return dispatcher.dispatch(kw[parameter].__class__)(*args, **kw) + + # else dispatch using the second argument + return dispatcher.dispatch(args[1].__class__)(*args, **kw) + + # copy the original method's registered methods to the wrapper + wrapper.register = dispatcher.register + + # update wrapper to look like the original method + update_wrapper(wrapper, method) + + return wrapper diff --git a/src/careamics/utils/path_utils.py b/src/careamics/utils/path_utils.py new file mode 100644 index 00000000..61bb744a --- /dev/null +++ b/src/careamics/utils/path_utils.py @@ -0,0 +1,24 @@ +from pathlib import Path +from typing import Union + + +def check_path_exists(path: Union[str, Path]) -> Path: + """Check if a path exists. If not, raise an error. + + Note that it returns `path` as a Path object. + + Parameters + ---------- + path : Union[str, Path] + Path to check. + + Returns + ------- + Path + Path as a Path object. + """ + path = Path(path) + if not path.exists(): + raise FileNotFoundError(f"Data path {path} is incorrect or does not exist.") + + return path diff --git a/src/careamics/utils/misc.py b/src/careamics/utils/ram.py similarity index 73% rename from src/careamics/utils/misc.py rename to src/careamics/utils/ram.py index a4f662d5..2a26c781 100644 --- a/src/careamics/utils/misc.py +++ b/src/careamics/utils/ram.py @@ -10,4 +10,4 @@ def get_ram_size() -> int: int RAM size in mbytes. """ - return psutil.virtual_memory().total / 1024 ** 2 + return psutil.virtual_memory().total / 1024**2 diff --git a/src/careamics/utils/torch_utils.py b/src/careamics/utils/torch_utils.py index 80f261ab..1cc0fb01 100644 --- a/src/careamics/utils/torch_utils.py +++ b/src/careamics/utils/torch_utils.py @@ -4,18 +4,21 @@ These functions are used to control certain aspects and behaviours of PyTorch. """ import inspect -from typing import Dict, Tuple +from typing import Dict, Union import torch +from careamics.config.support import SupportedOptimizer, SupportedScheduler + from ..utils.logging import get_logger -logger = get_logger(__name__) # TODO are logger still needed? +logger = get_logger(__name__) # TODO are logger still needed? + def filter_parameters( func: type, user_params: dict, -) -> Tuple[dict, dict]: +) -> dict: """ Filter parameters according to the function signature. @@ -40,6 +43,26 @@ def filter_parameters( return {key: user_params[key] for key in params_to_be_used} +def get_optimizer(name: str) -> torch.optim.Optimizer: + """ + Return the optimizer class given its name. + + Parameters + ---------- + name : str + Optimizer name. + + Returns + ------- + torch.nn.Optimizer + Optimizer class. + """ + if name not in SupportedOptimizer: + raise NotImplementedError(f"Optimizer {name} is not yet supported.") + + return getattr(torch.optim, name) + + def get_optimizers() -> Dict[str, str]: """ Return the list of all optimizers available in torch.optim. @@ -57,6 +80,31 @@ def get_optimizers() -> Dict[str, str]: return optims +def get_scheduler( + name: str, +) -> Union[ + torch.optim.lr_scheduler.LRScheduler, + torch.optim.lr_scheduler.ReduceLROnPlateau, +]: + """ + Return the scheduler class given its name. + + Parameters + ---------- + name : str + Scheduler name. + + Returns + ------- + Union + Scheduler class. + """ + if name not in SupportedScheduler: + raise NotImplementedError(f"Scheduler {name} is not yet supported.") + + return getattr(torch.optim.lr_scheduler, name) + + def get_schedulers() -> Dict[str, str]: """ Return the list of all schedulers available in torch.optim.lr_scheduler. diff --git a/src/careamics/utils/validators.py b/src/careamics/utils/validators.py index a897f82c..a8ac8481 100644 --- a/src/careamics/utils/validators.py +++ b/src/careamics/utils/validators.py @@ -5,8 +5,6 @@ """ from typing import List -import numpy as np - AXES = "STCZYX" @@ -39,6 +37,9 @@ def check_axes_validity(axes: str) -> bool: f"Invalid axes {axes}. Must contain at least 2 and at most 6 axes." ) + if "YX" not in _axes: + raise ValueError(f"Invalid axes {axes}. Must contain at least X and Y axes.") + # all characters must be in REF_AXES = 'STCZYX' if not all(s in AXES for s in _axes): raise ValueError(f"Invalid axes {axes}. Must be a combination of {AXES}.") @@ -65,34 +66,6 @@ def check_axes_validity(axes: str) -> bool: return True -def check_external_array_validity( - array: np.ndarray, axes: str, use_tiling: bool -) -> None: - """ - Check that the numpy array is compatible with the axes. - - Parameters - ---------- - array : np.ndarray - Numpy array. - axes : str - Valid axes (see check_axes_validity). - """ - if use_tiling is False: - if len(array.shape) - 1 != len(axes): - raise ValueError( - f"Array has {len(array.shape)} dimensions, but axes are {len(axes)}." - f"When not tiling, externally provided arrays must have extra" - f" dimensions for batch and channel to be compatible with the" - f" batchnorm layers." - ) - else: - if len(array.shape) != len(axes): - raise ValueError( - f"Array has {len(array.shape)} dimensions, but axes are {len(axes)}." - ) - - def check_tiling_validity(tile_shape: List[int], overlaps: List[int]) -> None: """ Check that the tiling parameters are valid. diff --git a/tests/bioimage/test_io.py b/tests/bioimage/test_io.py index a6c06d6d..afdc6ab4 100644 --- a/tests/bioimage/test_io.py +++ b/tests/bioimage/test_io.py @@ -1,15 +1,3 @@ -from pathlib import Path - -import bioimageio.spec.shared.raw_nodes as nodes -import pytest -import torch -from bioimageio.core import load_resource_description - -from careamics.bioimage.io import get_default_model_specs -from careamics.config import Configuration, save_configuration -from careamics.models import create_model -from careamics.utils import cwd - # @pytest.mark.parametrize("name", ["Noise2Void"]) # @pytest.mark.parametrize("is_3D", [True, False]) # def test_default_model_specs(name, is_3D): diff --git a/tests/config/architectures/test_custom_model.py b/tests/config/architectures/test_custom_model.py new file mode 100644 index 00000000..c7801907 --- /dev/null +++ b/tests/config/architectures/test_custom_model.py @@ -0,0 +1,76 @@ +import pytest +from torch import nn, ones + +from careamics.config.architectures import CustomModel, get_custom_model, register_model +from careamics.config.support import SupportedArchitecture + + +@register_model(name="linear") +class LinearModel(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + + self.in_features = in_features + self.out_features = out_features + self.weight = nn.Parameter(ones(in_features, out_features)) + self.bias = nn.Parameter(ones(out_features)) + + def forward(self, input): + return (input @ self.weight) + self.bias + + +@register_model(name="not_a_model") +class NotAModel: + def __init__(self, id): + self.id = id + + def forward(self, input): + return input + + +def test_linear_model(): + """Test that the model can be retrieved and instantiated.""" + model = get_custom_model("linear") + model(in_features=10, out_features=5) + + +def test_not_a_model(): + """Test that the model can be retrieved and instantiated.""" + model = get_custom_model("not_a_model") + model(3) + + +def test_custom_model(): + """Test that the custom model can be instantiated.""" + # prepare model dictionary + model_dict = { + "architecture": SupportedArchitecture.CUSTOM.value, + "name": "linear", + "parameters": {"in_features": 10, "out_features": 5}, + } + + # create Pydantic model + pydantic_model = CustomModel(**model_dict) + + # instantiate model + model_class = get_custom_model(pydantic_model.name) + model = model_class(**pydantic_model.parameters) + + assert isinstance(model, LinearModel) + assert model.in_features == 10 + assert model.out_features == 5 + + +def test_custom_model_wrong_class(): + """Test that the Pydantic custom model raises an error if the model is not a + torch.nn.Module subclass.""" + # prepare model dictionary + model_dict = { + "architecture": "Custom", + "name": "not_a_model", + "parameters": {"id": 3}, + } + + # create Pydantic model + with pytest.raises(ValueError): + CustomModel(**model_dict) diff --git a/tests/config/architectures/test_register_model.py b/tests/config/architectures/test_register_model.py new file mode 100644 index 00000000..e41cf4b0 --- /dev/null +++ b/tests/config/architectures/test_register_model.py @@ -0,0 +1,45 @@ +import pytest + +from careamics.config.architectures import ( + clear_custom_models, + get_custom_model, + register_model, +) + + +# register a model +@register_model(name="mymodel") +class MyModel: + model_name: str + model_id: int + + +def test_register_model(): + """Test the register_model decorator.""" + + # get custom model + model = get_custom_model("mymodel") + + # check if it is a subclass of MyModel + assert issubclass(model, MyModel) + + +def test_wrong_model(): + """Test that an error is raised if an unknown model is requested.""" + get_custom_model("mymodel") + + with pytest.raises(ValueError): + get_custom_model("unknown_model") + + +def test_clear_custom_models(): + """Test that the custom models are cleared.""" + # retrieve model + get_custom_model("mymodel") + + # clear custom models + clear_custom_models() + + # request the model again + with pytest.raises(ValueError): + get_custom_model("mymodel") diff --git a/tests/config/architectures/test_unet_model.py b/tests/config/architectures/test_unet_model.py index a56d5ac4..c0fd4804 100644 --- a/tests/config/architectures/test_unet_model.py +++ b/tests/config/architectures/test_unet_model.py @@ -1,7 +1,7 @@ import pytest from careamics.config.architectures import UNetModel -from careamics.config.support import SupportedArchitecture, SupportedActivation +from careamics.config.support import SupportedActivation def test_instantiation(): @@ -28,29 +28,19 @@ def test_architecture_missing(): @pytest.mark.parametrize("num_channels_init", [8, 16, 32, 96, 128]) -def test_num_channels_init( - num_channels_init: int -): +def test_num_channels_init(num_channels_init: int): """Test that UNetModel accepts num_channels_init as an even number and minimum 8.""" - model_params = { - "architecture": "UNet", - "num_channels_init": num_channels_init - } + model_params = {"architecture": "UNet", "num_channels_init": num_channels_init} # instantiate model UNetModel(**model_params) @pytest.mark.parametrize("num_channels_init", [2, 17, 127]) -def test_wrong_num_channels_init( - num_channels_init: int -): +def test_wrong_num_channels_init(num_channels_init: int): """Test that wrong num_channels_init causes an error.""" - model_params = { - "architecture": "UNet", - "num_channels_init": num_channels_init - } + model_params = {"architecture": "UNet", "num_channels_init": num_channels_init} with pytest.raises(ValueError): UNetModel(**model_params) @@ -58,11 +48,11 @@ def test_wrong_num_channels_init( def test_activations(): """Test that UNetModel accepts all activations.""" - for act in SupportedActivation: + for act in SupportedActivation: model_params = { "architecture": "UNet", "num_channels_init": 16, - "final_activation": act.value + "final_activation": act.value, } # instantiate model @@ -72,22 +62,22 @@ def test_activations(): def test_all_activations_are_supported(): """Test that all activations defined in the Literal are supported.""" # list of supported activations - activations = [act for act in SupportedActivation] + activations = list(SupportedActivation) # Algorithm json schema schema = UNetModel.schema() # check that all activations are supported - for act in schema["properties"]["final_activation"]['enum']: - assert act in activations - + for act in schema["properties"]["final_activation"]["enum"]: + assert act in activations + def test_activation_wrong_values(): """Test that wrong values are not accepted.""" model_params = { "architecture": "UNet", "num_channels_init": 16, - "final_activation": "wrong" + "final_activation": "wrong", } with pytest.raises(ValueError): @@ -96,11 +86,7 @@ def test_activation_wrong_values(): def test_parameters_wrong_values_by_assigment(): """Test that wrong values are not accepted through assignment.""" - model_params = { - "architecture": "UNet", - "num_channels_init": 16, - "depth": 2 - } + model_params = {"architecture": "UNet", "num_channels_init": 16, "depth": 2} model = UNetModel(**model_params) # depth @@ -118,8 +104,8 @@ def test_model_dump(): """Test that default values are excluded from model dump.""" model_params = { "architecture": "UNet", - "num_channels_init": 16, # non-default value - "final_activation": "ReLU", # non-default value + "num_channels_init": 16, # non-default value + "final_activation": "ReLU", # non-default value } model = UNetModel(**model_params) diff --git a/tests/config/support/test_supported_data.py b/tests/config/support/test_supported_data.py new file mode 100644 index 00000000..44df40e0 --- /dev/null +++ b/tests/config/support/test_supported_data.py @@ -0,0 +1,76 @@ +from fnmatch import fnmatch +from pathlib import Path + +import numpy as np +import pytest +import tifffile + +from careamics.config.support import SupportedData + + +def test_extension_tiff_fnmatch(tmp_path: Path): + """Test that the TIFF extension is compatible with fnmatch.""" + path = tmp_path / "test.tif" + + # test as str + assert fnmatch(str(path), SupportedData.get_extension(SupportedData.TIFF)) + + # test as Path + assert fnmatch(path, SupportedData.get_extension(SupportedData.TIFF)) + + +def test_extension_tiff_rglob(tmp_path: Path): + """Test that the TIFF extension is compatible with Path.rglob.""" + # create text file + text_path = tmp_path / "test.txt" + text_path.write_text("test") + + # create image + path = tmp_path / "test.tif" + image = np.ones((10, 10)) + tifffile.imsave(path, image) + + # search for files + files = list(tmp_path.rglob(SupportedData.get_extension(SupportedData.TIFF))) + assert len(files) == 1 + assert files[0] == path + + +def test_extension_custom_fnmatch(tmp_path: Path): + """Test that the custom extension is compatible with fnmatch.""" + path = tmp_path / "test.czi" + + # test as str + assert fnmatch(str(path), SupportedData.get_extension(SupportedData.CUSTOM)) + + # test as Path + assert fnmatch(path, SupportedData.get_extension(SupportedData.CUSTOM)) + + +def test_extension_custom_rglob(tmp_path: Path): + """Test that the custom extension is compatible with Path.rglob.""" + # create text file + text_path = tmp_path / "test.txt" + text_path.write_text("test") + + # create image + path = tmp_path / "test.npy" + image = np.ones((10, 10)) + np.save(path, image) + + # search for files + files = list(tmp_path.rglob(SupportedData.get_extension(SupportedData.CUSTOM))) + assert len(files) == 2 + assert set(files) == {path, text_path} + + +def test_extension_array_error(): + """Test that the array extension raises NotImplementedError.""" + with pytest.raises(NotImplementedError): + SupportedData.get_extension(SupportedData.ARRAY) + + +def test_extension_any_error(): + """Test that any extension raises NotImplementedError.""" + with pytest.raises(ValueError): + SupportedData.get_extension("some random") diff --git a/tests/config/support/test_supported_optimizers.py b/tests/config/support/test_supported_optimizers.py index abb2a50d..8682fc33 100644 --- a/tests/config/support/test_supported_optimizers.py +++ b/tests/config/support/test_supported_optimizers.py @@ -1,8 +1,8 @@ from torch import optim from careamics.config.support.supported_optimizers import ( + SupportedOptimizer, SupportedScheduler, - SupportedOptimizer ) diff --git a/tests/config/support/test_supported_transforms.py b/tests/config/support/test_supported_transforms.py new file mode 100644 index 00000000..1c687148 --- /dev/null +++ b/tests/config/support/test_supported_transforms.py @@ -0,0 +1,7 @@ +from careamics.config.support import SupportedTransform, get_all_transforms + + +def test_supported_transforms_in_accepted_transforms(): + """Test that all the supported transforms are in the accepted transforms.""" + for transform in SupportedTransform: + assert transform in get_all_transforms() diff --git a/tests/config/test_algorithm.py b/tests/config/test_algorithm.py index 9d23e868..795636e6 100644 --- a/tests/config/test_algorithm.py +++ b/tests/config/test_algorithm.py @@ -1,6 +1,6 @@ import pytest -from careamics.config.algorithm import AlgorithmModel +from careamics.config.algorithm_model import AlgorithmModel from careamics.config.support import ( SupportedAlgorithm, SupportedArchitecture, @@ -8,35 +8,17 @@ ) -# from careamics.config.noise_models import NoiseModel - - -# def test_algorithm_noise_model(): -# d = { -# "model_type": "hist", -# "parameters": {"min_value": 324, "max_value": 3465}, -# } -# NoiseModel(**d) - - -def test_supported_algorithm(minimum_algorithm): - """Test that all supported algorithms are accepted by the AlgorithmModel.""" - for algo in SupportedAlgorithm: - minimum_algorithm["algorithm"] = algo.value - AlgorithmModel(**minimum_algorithm) - - def test_all_algorithms_are_supported(): """Test that all algorithms defined in the Literal are supported.""" # list of supported algorithms - algorithms = [algo for algo in SupportedAlgorithm] + algorithms = list(SupportedAlgorithm) - # Algorithm json schema + # Algorithm json schema to extract the literal value schema = AlgorithmModel.model_json_schema() # check that all algorithms are supported - for algo in schema["properties"]["algorithm"]['enum']: - assert algo in algorithms + for algo in schema["properties"]["algorithm"]["enum"]: + assert algo in algorithms def test_supported_losses(minimum_algorithm): @@ -49,56 +31,36 @@ def test_supported_losses(minimum_algorithm): def test_all_losses_are_supported(): """Test that all losses defined in the Literal are supported.""" # list of supported losses - losses = [loss for loss in SupportedLoss] + losses = list(SupportedLoss) # Algorithm json schema schema = AlgorithmModel.model_json_schema() # check that all losses are supported - for loss in schema["properties"]["loss"]['enum']: - assert loss in losses + for loss in schema["properties"]["loss"]["enum"]: + assert loss in losses def test_model_discriminator(minimum_algorithm): """Test that discriminator permits correct assignment.""" for model_name in SupportedArchitecture: - - minimum_algorithm["model"]["architecture"] = model_name.value - - algo = AlgorithmModel(**minimum_algorithm) - assert algo.model.architecture == model_name.value - - -def test_wrong_values_by_assigment(minimum_algorithm: dict): - """Test that wrong values are not accepted through assignment.""" - algo = AlgorithmModel(**minimum_algorithm) - - # loss - with pytest.raises(ValueError): - algo.loss = "ms-meh" - assert algo.loss == minimum_algorithm["loss"] - - # model - with pytest.raises(ValueError): - algo.model.architecture = "YouNet" - - # optimizer - with pytest.raises(ValueError): - algo.optimizer = "I'd rather not to." - - # lr_scheduler - with pytest.raises(ValueError): - algo.lr_scheduler = "Why don't YOU schedule it for once?" - - - -def test_model_dump(minimum_algorithm: dict): - """Test that default values are excluded from model dump with - `exclude_defaults=True`.""" - algo = AlgorithmModel(**minimum_algorithm) - - # dump model - model_dict = algo.model_dump(exclude_defaults=True) - - # check that default values are excluded except the architecture - assert len(model_dict) == 5 \ No newline at end of file + # TODO change once VAE are implemented + if model_name.value == "UNet": + minimum_algorithm["model"]["architecture"] = model_name.value + + algo = AlgorithmModel(**minimum_algorithm) + assert algo.model.architecture == model_name.value + + +@pytest.mark.parametrize( + "algorithm, loss, model", + [ + ("n2v", "n2v", {"architecture": "UNet", "n2v2": False}), + ("n2v2", "n2v", {"architecture": "UNet", "n2v2": True}), + ("structn2v", "n2v", {"architecture": "UNet", "n2v2": False}), + ("custom", "mae", {"architecture": "UNet", "n2v2": True}), + ], +) +def test_algorithm_constraints(algorithm: str, loss: str, model: dict): + """Test that constraints are passed for each algorithm.""" + AlgorithmModel(algorithm=algorithm, loss=loss, model=model) diff --git a/tests/config/test_config.py b/tests/config/test_config.py index 51a91f00..1a296ce0 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -7,10 +7,11 @@ load_configuration, save_configuration, ) +from careamics.config.support import SupportedTransform @pytest.mark.parametrize("name", ["Sn4K3", "C4_M e-L"]) -def test_config_valid_names(minimum_configuration: dict, name: str): +def test_valid_names(minimum_configuration: dict, name: str): """Test valid names (letters, numbers, spaces, dashes and underscores).""" minimum_configuration["experiment_name"] = name myconf = Configuration(**minimum_configuration) @@ -18,7 +19,7 @@ def test_config_valid_names(minimum_configuration: dict, name: str): @pytest.mark.parametrize("name", ["", " ", "#", "/", "^", "%", ",", ".", "a=b"]) -def test_config_invalid_names(minimum_configuration: dict, name: str): +def test_invalid_names(minimum_configuration: dict, name: str): """Test that invalid names raise an error.""" minimum_configuration["experiment_name"] = name with pytest.raises(ValueError): @@ -26,7 +27,7 @@ def test_config_invalid_names(minimum_configuration: dict, name: str): @pytest.mark.parametrize("path", ["", "tmp"]) -def test_config_valid_working_directory( +def test_valid_working_directory( tmp_path: Path, minimum_configuration: dict, path: str ): """Test valid working directory. @@ -39,7 +40,7 @@ def test_config_valid_working_directory( assert myconf.working_directory == path -def test_config_invalid_working_directory(tmp_path: Path, minimum_configuration: dict): +def test_invalid_working_directory(tmp_path: Path, minimum_configuration: dict): """Test that invalid working directory raise an error. Since its parent does not exist, this case is invalid. @@ -57,9 +58,11 @@ def test_config_invalid_working_directory(tmp_path: Path, minimum_configuration: def test_3D_algorithm_and_data_compatibility(minimum_configuration: dict): - """Test that errors are raised if algithm `is_3D` and data axes are incompatible.""" + """Test that errors are raised if algorithm `is_3D` and data axes are + incompatible. + """ # 3D but no Z in axes - minimum_configuration["algorithm"]["model"]["parameters"]["conv_dims"] = 3 + minimum_configuration["algorithm"]["model"]["conv_dims"] = 3 with pytest.raises(ValueError): Configuration(**minimum_configuration) @@ -88,83 +91,45 @@ def test_set_3D(minimum_configuration: dict): conf.set_3D(False, "ZYX") -def test_wrong_values_by_assignment(complete_config: dict): - """Test that wrong values raise an error when assigned.""" - config = Configuration(**complete_config) - - # experiment name - config.experiment_name = "My name is Inigo Montoya" - with pytest.raises(ValueError): - config.experiment_name = "¯\\_(ツ)_/¯" - - # working directory - config.working_directory = complete_config["working_directory"] - with pytest.raises(ValueError): - config.working_directory = "o/o" - - # data - config.data = complete_config["data"] - with pytest.raises((ValueError, TypeError)): - # TODO Yet again, validation isn't happening !!!! - config.data = "I am not a data model" - - # algorithm - config.algorithm = complete_config["algorithm"] +def test_algorithm_and_data_compatibility(minimum_configuration: dict): + """Test that the default data transforms are comaptible with n2v.""" + minimum_configuration["algorithm"]["algorithm"] = "n2v" + Configuration(**minimum_configuration) + + +def test_algorithm_and_data_incompatibility(minimum_configuration: dict): + """Test that errors are corrected if the data transforms are incompatible with + the algorithm.""" + minimum_configuration["algorithm"]["algorithm"] = "n2v" + + # missing ManipulateN2V + minimum_configuration["data"]["transforms"] = [{"name": SupportedTransform.NDFLIP}] + config = Configuration(**minimum_configuration) + assert len(config.data.transforms) == 2 + assert config.data.transforms[-1].name == SupportedTransform.MANIPULATE_N2V + + # ManipulateN2V not the last transform + minimum_configuration["data"]["transforms"] = [ + { + "name": SupportedTransform.MANIPULATE_N2V, + "parameters": { + "roi_size": 15, + }, + }, + {"name": SupportedTransform.NDFLIP}, + ] + config = Configuration(**minimum_configuration) + assert len(config.data.transforms) == 2 + assert config.data.transforms[-1].name == SupportedTransform.MANIPULATE_N2V + assert config.data.transforms[-1].parameters["roi_size"] == 15 + + # multiple ManipulateN2V raises an error + minimum_configuration["data"]["transforms"] = [ + {"name": SupportedTransform.MANIPULATE_N2V}, + {"name": SupportedTransform.MANIPULATE_N2V}, + ] with pytest.raises(ValueError): - config.algorithm = None - - # training - config.training = complete_config["training"] - with pytest.raises(ValueError): - config.training = "Hubert Blaine Wolfeschlegelsteinhausenbergerdorff Sr." - - # TODO Because algorithm is a sub-model of Configuration, and the validation is - # done at the level of the Configuration, this does not cause any error, although - # it should. - config.algorithm.is_3D = True - - -def test_minimum_configuration(minimum_configuration: dict): - """Test that we can instantiate a minimum config.""" - dictionary = Configuration(**minimum_configuration).model_dump() - assert dictionary == minimum_configuration - - -def test_complete_config(complete_config: dict): - """Test that we can instantiate a minimum config.""" - dictionary = Configuration(**complete_config).model_dump() - assert dictionary == complete_config - - -def test_config_to_dict_with_default_optionals(complete_config: dict): - """Test that the exclude optional options in model dump gives a full configuration, - including the default optional values. - - Note that None values are always excluded. - """ - # Algorithm default optional parameters - complete_config["algorithm"]["masking_strategy"]["strategy_type"] = "default" - complete_config["algorithm"]["masking_strategy"]["parameters"][ - "masked_pixel_percentage" - ] = 0.2 - complete_config["algorithm"]["model"]["parameters"] = { - "depth": 2, - "num_channels_init": 32, - } - - # Training default optional parameters - complete_config["training"]["optimizer"]["parameters"] = {} - complete_config["training"]["lr_scheduler"]["parameters"] = {} - complete_config["training"]["use_wandb"] = True - complete_config["training"]["num_workers"] = 0 - complete_config["training"]["amp"] = { - "use": True, - "init_scale": 1024, - } - - # instantiate config - myconf = Configuration(**complete_config) - assert myconf.model_dump(exclude_optionals=False) == complete_config + Configuration(**minimum_configuration) def test_config_to_yaml(tmp_path: Path, minimum_configuration: dict): diff --git a/tests/config/test_data.py b/tests/config/test_data.py index 9cabb222..ed5b6e0e 100644 --- a/tests/config/test_data.py +++ b/tests/config/test_data.py @@ -1,12 +1,14 @@ import pytest +from albumentations import Compose -from careamics.config.data import DataModel +from careamics.config.data_model import DataModel +from careamics.config.support import SupportedTransform, get_all_transforms @pytest.mark.parametrize("ext", ["nd2", "jpg", "png ", "zarr", "npy"]) def test_wrong_extensions(minimum_data: dict, ext: str): """Test that supported model raises ValueError for unsupported extensions.""" - minimum_data["data_format"] = ext + minimum_data["data_type"] = ext # instantiate DataModel model with pytest.raises(ValueError): @@ -24,21 +26,6 @@ def test_mean_std_non_negative(minimum_data: dict, mean, std): assert data_model.std == std -def test_mean_std_negative(minimum_data: dict): - """Test that negative mean and std are not accepted.""" - minimum_data["mean"] = -1 - minimum_data["std"] = 10.4 - - with pytest.raises(ValueError): - DataModel(**minimum_data) - - minimum_data["mean"] = 10.4 - minimum_data["std"] = -1 - - with pytest.raises(ValueError): - DataModel(**minimum_data) - - def test_mean_std_both_specified_or_none(minimum_data: dict): """Test an error is raised if std is specified but mean is None.""" # No error if both are None @@ -77,13 +64,8 @@ def test_patch_size(minimum_data: dict): assert data_model.patch_size == [12, 12, 12] -@pytest.mark.parametrize("patch_size", - [ - [12], - [0, 12, 12], - [12, 12, 13], - [12, 12, 12, 12] - ] +@pytest.mark.parametrize( + "patch_size", [[12], [0, 12, 12], [12, 12, 13], [12, 12, 12, 12]] ) def test_wrong_patch_size(minimum_data: dict, patch_size): """Test that wrong patch sizes are not accepted (zero or odd, dims 1 or > 3).""" @@ -93,46 +75,63 @@ def test_wrong_patch_size(minimum_data: dict, patch_size): DataModel(**minimum_data) -# TODO transforms validation tests - -def test_wrong_values_by_assigment(minimum_data: dict): - """Test that wrong values are not accepted through assignment.""" - data_model = DataModel(**minimum_data) +def test_passing_supported_transforms(minimum_data: dict): + """Test that list of supported transforms can be passed.""" + minimum_data["transforms"] = [ + {"name": SupportedTransform.NDFLIP}, + {"name": SupportedTransform.MANIPULATE_N2V}, + ] + DataModel(**minimum_data) - # in memory - data_model.in_memory = False - with pytest.raises(ValueError): - data_model.in_memory = "Trues" - # data format - data_model.extension = "tiff" - with pytest.raises(ValueError): - data_model.extension = "png" +def test_passing_empty_transforms(minimum_data: dict): + """Test that empty list of transforms can be passed.""" + minimum_data["transforms"] = [] + DataModel(**minimum_data) - # axes - data_model.axes = "SZYX" - with pytest.raises(ValueError): - data_model.axes = "-YX" - # mean - data_model.mean = 12 +def test_passing_incorrect_element(minimum_data: dict): + """Test that incorrect element in the list of transforms raises an error.""" + minimum_data["transforms"] = [ + {"name": get_all_transforms()[SupportedTransform.NDFLIP]()}, + ] with pytest.raises(ValueError): - data_model.mean = -1 + DataModel(**minimum_data) - # std - data_model.std = 3.6 - with pytest.raises(ValueError): - data_model.std = -1 - # patch size - data_model.patch_size = [12, 12, 12] - with pytest.raises(ValueError): - data_model.patch_size = [12] +def test_passing_compose_transform(minimum_data: dict): + """Test that Compose transform can be passed.""" + minimum_data["transforms"] = Compose( + [ + get_all_transforms()[SupportedTransform.NDFLIP](), + get_all_transforms()[SupportedTransform.MANIPULATE_N2V](), + ] + ) + DataModel(**minimum_data) - # TODO transforms +def test_3D_and_transforms(minimum_data: dict): + """Test that NDFlip is corrected if the data is 3D.""" + minimum_data["transforms"] = [ + { + "name": SupportedTransform.NDFLIP.value, + "parameters": { + "is_3D": True, + "flip_z": True, + }, + }, + { + "name": SupportedTransform.XY_RANDOM_ROTATE90.value, + "parameters": { + "is_3D": True, + }, + }, + ] + data = DataModel(**minimum_data) + assert data.transforms[0].parameters["is_3D"] is False + assert data.transforms[1].parameters["is_3D"] is False -def test_data_to_dict_minimum(minimum_data: dict): - """Test that export to dict does not include optional values.""" - data_minimum = DataModel(**minimum_data).model_dump() - assert data_minimum == minimum_data + # change to 3D + data.axes = "ZYX" + data.transforms[0].parameters["is_3D"] = True + data.transforms[1].parameters["is_3D"] = True diff --git a/tests/config/test_optimizers.py b/tests/config/test_optimizers.py index 12ad5740..ba91e152 100644 --- a/tests/config/test_optimizers.py +++ b/tests/config/test_optimizers.py @@ -1,10 +1,10 @@ import pytest +from careamics.config.optimizer_models import LrSchedulerModel, OptimizerModel from careamics.config.support.supported_optimizers import ( + SupportedOptimizer, SupportedScheduler, - SupportedOptimizer ) -from careamics.config.optimizers import OptimizerModel, LrSchedulerModel @pytest.mark.parametrize( @@ -49,7 +49,7 @@ def test_optimizer_parameters(optimizer_name: SupportedOptimizer, parameters: di def test_sgd_missing_parameter(): """Test that SGD optimizer fails if `lr` is not provided. - + Note: The SGD optimizer requires the `lr` parameter. """ with pytest.raises(ValueError): @@ -57,15 +57,16 @@ def test_sgd_missing_parameter(): # test that it works if lr is provided optimizer = OptimizerModel( - name=SupportedOptimizer.SGD.value, - parameters={"lr": 0.1} + name=SupportedOptimizer.SGD.value, parameters={"lr": 0.1} ) assert optimizer.parameters == {"lr": 0.1} def test_optimizer_wrong_values_by_assignments(): """Test that wrong values cause an error during assignment.""" - optimizer = OptimizerModel(name=SupportedOptimizer.Adam.value, parameters={"lr": 0.08}) + optimizer = OptimizerModel( + name=SupportedOptimizer.Adam.value, parameters={"lr": 0.08} + ) # name optimizer.name = SupportedOptimizer.SGD.value @@ -92,17 +93,6 @@ def test_optimizer_to_dict_optional(): assert optim_minimum == config -def test_optimizer_to_dict_default_optional(): - """ "Test that export to dict does not include default optional value.""" - config = { - "name": "Adam", - "parameters": {}, - } - - optim_minimum = OptimizerModel(**config).model_dump(exclude_defaults=True) - assert "parameters" not in optim_minimum.keys() - - @pytest.mark.parametrize( "lr_scheduler_name, parameters", [ @@ -154,54 +144,3 @@ def test_scheduler_missing_parameter(): name=SupportedScheduler.StepLR.value, parameters={"step_size": "5"} ) assert lr_scheduler.parameters == {"step_size": "5"} - - -def test_scheduler_wrong_values_by_assignments(): - """Test that wrong values cause an error during assignment.""" - scheduler = LrSchedulerModel( - name=SupportedScheduler.ReduceLROnPlateau.value, parameters={"factor": 0.3} - ) - - # name - scheduler.name = SupportedScheduler.ReduceLROnPlateau.value - with pytest.raises(ValueError): - # this fails because the step parameter is missing - scheduler.name = SupportedScheduler.StepLR.value - - with pytest.raises(ValueError): - scheduler.name = "Schedule it yourself!" - - # parameters - scheduler.name = SupportedScheduler.ReduceLROnPlateau.value - scheduler.parameters = {"factor": 0.1} - with pytest.raises(ValueError): - scheduler.parameters = "factor = 0.3" - - -def test_scheduler_to_dict_optional(): - """ "Test that export to dict includes optional values.""" - scheduler_config = { - "name": "ReduceLROnPlateau", - "parameters": { - "mode": "max", - "factor": 0.3, - }, - } - - scheduler_complete = LrSchedulerModel(**scheduler_config).model_dump() - assert scheduler_complete == scheduler_config - - -def test_scheduler_to_dict_default_optional(): - """ "Test that export to dict does not include optional value.""" - scheduler_config = { - "name": "ReduceLROnPlateau", - "parameters": {}, - } - - scheduler_complete = LrSchedulerModel(**scheduler_config).model_dump( - exclude_defaults=True - ) - assert "parameters" not in scheduler_complete.keys() - - diff --git a/tests/config/test_training.py b/tests/config/test_training.py index 392bd0a6..b64bf8fb 100644 --- a/tests/config/test_training.py +++ b/tests/config/test_training.py @@ -1,8 +1,6 @@ import pytest -from pydantic import conlist - -from careamics.config.training import AMP, Training +from careamics.config.training_model import AMP, Training @pytest.mark.parametrize("init_scale", [512, 1024, 65536]) diff --git a/tests/config/test_transform.py b/tests/config/test_transform.py deleted file mode 100644 index 1df501fe..00000000 --- a/tests/config/test_transform.py +++ /dev/null @@ -1,91 +0,0 @@ -import pytest - -from careamics.config.transform import TransformModel, ALL_TRANSFORMS - - -def test_all_transforms(): - """Test that all transforms are can be instantiated.""" - for name, func in ALL_TRANSFORMS.items(): - print(name) - func() - - -@pytest.mark.parametrize("name, parameters", - [ - ("flip", {}), - ("flip", {"p": 0.5}), - ("DefaultManipulateN2V", {"masked_pixel_percentage": 0.2, "roi_size": 11}), - ("DefaultManipulateN2V", {}), - ] -) -def test_transform(name, parameters): - TransformModel(name=name, parameters=parameters) - - -@pytest.mark.parametrize("name, parameters", - [ - ("flippy", {"p": 0.5}), - ("flip", {"ps": 0.5}), - ] -) -def test_transform_wrong_values(name, parameters): - with pytest.raises(ValueError): - TransformModel(name=name, parameters=parameters) - - -# TODO: tests for the ManipulateN2V transforms - - -@pytest.mark.parametrize("roi_size", [5, 9, 15]) -def test_parameters_roi_size(roi_size: int): - """Test that Algorithm accepts roi_size as an even number within the - range [3, 21].""" - # complete_config["algorithm"]["masking_strategy"]["parameters"][ - # "roi_size" - # ] = roi_size - # algorithm = Algorithm(**complete_config["algorithm"]) - # assert algorithm.masking_strategy.parameters["roi_size"] == roi_size - # TODO - pass - -@pytest.mark.parametrize("roi_size", [2, 4, 23]) -def test_parameters_wrong_roi_size(roi_size: int): - """Test that wrong num_channels_init cause an error.""" - # complete_config["algorithm"]["masking_strategy"]["parameters"][ - # "roi_size" - # ] = roi_size - # with pytest.raises(ValueError): - # Algorithm(**complete_config["algorithm"]) - # TODO - pass - - -@pytest.mark.parametrize("masked_pixel_percentage", [0.1, 0.2, 5, 20]) -def test_masked_pixel_percentage(masked_pixel_percentage: float): - """Test that Algorithm accepts the minimum configuration.""" - # algorithm = complete_config["algorithm"] - # algorithm["masking_strategy"]["parameters"][ - # "masked_pixel_percentage" - # ] = masked_pixel_percentage - - # algo = Algorithm(**algorithm) - # assert ( - # algo.masking_strategy.parameters["masked_pixel_percentage"] - # == masked_pixel_percentage - # ) - # TODO - pass - - -@pytest.mark.parametrize("masked_pixel_percentage", [0.01, 21]) -def test_wrong_masked_pixel_percentage( - masked_pixel_percentage: float -): - """Test that Algorithm accepts the minimum configuration.""" - # algorithm = complete_config["algorithm"]["masking_strategy"]["parameters"] - # algorithm["masked_pixel_percentage"] = masked_pixel_percentage - - # with pytest.raises(ValueError): - # Algorithm(**algorithm) - # TODO - pass \ No newline at end of file diff --git a/tests/config/test_transform_model.py b/tests/config/test_transform_model.py new file mode 100644 index 00000000..28c6425a --- /dev/null +++ b/tests/config/test_transform_model.py @@ -0,0 +1,41 @@ +import pytest + +from careamics.config.support import SupportedTransform +from careamics.config.transform_model import TransformModel + + +@pytest.mark.parametrize( + "name, parameters", + [ + (SupportedTransform.NDFLIP, {}), + (SupportedTransform.XY_RANDOM_ROTATE90, {}), + (SupportedTransform.NORMALIZE, {"mean": 1.0, "std": 1.0}), + (SupportedTransform.MANIPULATE_N2V, {}), + ], +) +def test_official_transforms(name, parameters): + """Test that officially supported transforms are accepted.""" + TransformModel(name=name, parameters=parameters) + + +def test_nonexisting_transform(): + """Test that non-existing transforms are not accepted.""" + with pytest.raises(ValueError): + TransformModel(name="OptimusPrime") + + +def test_filtering_unknown_parameters(): + """Test that unknown parameters are filtered out.""" + parameters = {"some_param": 42, "p": 1.0} + + # create transform model + transform = TransformModel(name=SupportedTransform.NDFLIP, parameters=parameters) + + # check parameters + assert transform.parameters == {"p": 1.0} + + +def test_missing_parameters(): + """Test that missing parameters trigger an error.""" + with pytest.raises(ValueError): + TransformModel(name="RandomCrop", parameters={}) diff --git a/tests/conftest.py b/tests/conftest.py index 2c63eea3..17e1d93d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,16 +1,20 @@ -import copy import tempfile from pathlib import Path -from typing import Callable, Tuple +from typing import Callable, Generator, Tuple import numpy as np import pytest import tifffile from careamics.config import Configuration -from careamics.config.algorithm import AlgorithmModel, LrSchedulerModel, OptimizerModel -from careamics.config.data import DataModel -from careamics.config.training import Training +from careamics.config.algorithm_model import ( + AlgorithmModel, + LrSchedulerModel, + OptimizerModel, +) +from careamics.config.data_model import DataModel +from careamics.config.support import SupportedData +from careamics.config.training_model import Training # TODO add details about where each of these fixture is used (e.g. smoke test) @@ -36,17 +40,11 @@ def minimum_algorithm() -> dict: """ # create dictionary algorithm = { - "algorithm": "n2v", + "algorithm": "custom", "loss": "n2v", "model": { "architecture": "UNet", }, - "optimizer": { - "name": "Adam", - }, - "lr_scheduler": { - "name": "ReduceLROnPlateau" - }, } return algorithm @@ -63,8 +61,7 @@ def minimum_data() -> dict: """ # create dictionary data = { - "in_memory": True, - "data_format": "tif", + "data_type": SupportedData.TIFF.value, "patch_size": [64, 64], "axes": "SYX", } @@ -89,13 +86,11 @@ def minimum_training() -> dict: return training + @pytest.fixture def minimum_configuration( - tmp_path: Path, - minimum_algorithm: dict, - minimum_data: dict, - minimum_training: dict - ) -> dict: + tmp_path: Path, minimum_algorithm: dict, minimum_data: dict, minimum_training: dict +) -> dict: """Create a minimum configuration. Parameters @@ -126,68 +121,6 @@ def minimum_configuration( return configuration -@pytest.fixture -def complete_configuration(minimum_config: dict) -> dict: - """Create a complete configuration. - - This configuration should not be used for testing an Engine. - - Parameters - ---------- - minimum_config : dict - A minimum configuration. - - Returns - ------- - dict - A complete configuration example. - """ - # add to configuration - complete_config = copy.deepcopy(minimum_config) - complete_config["algorithm"]["loss"] = "pn2v" - complete_config["algorithm"]["noise_model"] = { - "model_type": "hist", - "parameters": { - "min_value": 350, - "max_value": 6500, - "bins": 256, - }, - } - complete_config["algorithm"]["transforms"] = { - "Flip": None, - "ManipulateN2V": { - "masked_pixel_percentage": 0.6, - "roi_size": 13, - }, - } - - complete_config["algorithm"]["model"] = { - "architecture": "UNet", - "parameters": { - "depth": 8, - "num_channels_init": 32, - }, - } - - complete_config["training"]["optimizer"]["parameters"] = { - "lr": 0.00999, - } - complete_config["training"]["lr_scheduler"]["parameters"] = { - "patience": 22, - } - complete_config["training"]["use_wandb"] = True - complete_config["training"]["num_workers"] = 6 - complete_config["training"]["amp"] = { - "use": True, - "init_scale": 512, - } - complete_config["data"]["in_memory"] = False - complete_config["data"]["mean"] = 666.666 - complete_config["data"]["std"] = 42.420 - - return complete_config - - @pytest.fixture def ordered_array() -> Callable: """A function that returns an array with ordered values.""" @@ -235,10 +168,11 @@ def array_3D() -> np.ndarray: @pytest.fixture -def temp_dir() -> Path: +def temp_dir() -> Generator[Path, None, None]: with tempfile.TemporaryDirectory() as temp_dir: yield Path(temp_dir) + @pytest.fixture def image_size() -> Tuple[int, int]: return (128, 128) @@ -253,6 +187,7 @@ def patch_size() -> Tuple[int, int]: def overlaps() -> Tuple[int, int]: return (32, 32) + @pytest.fixture def example_data_path( temp_dir: Path, image_size: Tuple[int, int], patch_size: Tuple[int, int] diff --git a/tests/dataset/dataset_utils/test_list_files.py b/tests/dataset/dataset_utils/test_list_files.py new file mode 100644 index 00000000..595fa461 --- /dev/null +++ b/tests/dataset/dataset_utils/test_list_files.py @@ -0,0 +1,232 @@ +from pathlib import Path + +import numpy as np +import pytest +import tifffile + +from careamics.config.support import SupportedData +from careamics.dataset.dataset_utils import ( + get_files_size, + list_files, + validate_source_target_files, +) + + +def test_get_files_size_tiff(tmp_path: Path): + """Test getting size of multiple TIFF files.""" + # create array + image = np.ones((10, 10)) + + # save array to tiff + path1 = tmp_path / "test1.tif" + tifffile.imwrite(path1, image) + + path2 = tmp_path / "test2.tiff" + tifffile.imwrite(path2, image) + + # save text file + path3 = tmp_path / "test3.txt" + path3.write_text("test") + + # save file in subdirectory + subdirectory = tmp_path / "subdir" + subdirectory.mkdir() + path4 = subdirectory / "test3.tif" + tifffile.imwrite(path4, image) + + # create file list + files = [path1, path2, path4] + + # get files size + size = get_files_size(files) + assert size > 0 + + +def test_list_single_file_tiff(tmp_path: Path): + """Test listing a single TIFF file.""" + # create array + image = np.ones((10, 10)) + + # save array to tiff + path = tmp_path / "test.tif" + tifffile.imwrite(path, image) + + # list file using parent directory + files = list_files(tmp_path, SupportedData.TIFF) + assert len(files) == 1 + assert files[0] == path + + # list file using file path + files = list_files(path, SupportedData.TIFF) + assert len(files) == 1 + assert files[0] == path + + +def test_list_multiple_files_tiff(tmp_path: Path): + """Test listing multiple TIFF files in subdirectories with additional files.""" + # create array + image = np.ones((10, 10)) + + # save array to /npy + path1 = tmp_path / "test1.tif" + tifffile.imwrite(path1, image) + + path2 = tmp_path / "test2.tif" + tifffile.imwrite(path2, image) + + # save text file + path3 = tmp_path / "test3.txt" + path3.write_text("test") + + # save file in subdirectory + subdirectory = tmp_path / "subdir" + subdirectory.mkdir() + path4 = subdirectory / "test3.tif" + tifffile.imwrite(path4, image) + + # create file list + ref_files = [path1, path2, path4] + + # list files using parent directory + files = list_files(tmp_path, SupportedData.TIFF) + assert len(files) == 3 + assert set(files) == set(ref_files) + + +def test_list_single_file_custom(tmp_path): + """Test listing a single custom file.""" + # create array + image = np.ones((10, 10)) + + # save as .npy + path = tmp_path / "custom.npy" + np.save(path, image) + + # list files using parent directory + files = list_files(tmp_path, SupportedData.CUSTOM) + assert len(files) == 1 + assert files[0] == path + + # list files using file path + files = list_files(path, SupportedData.CUSTOM) + assert len(files) == 1 + assert files[0] == path + + +def test_list_multiple_files_custom(tmp_path: Path): + """Test listing multiple custom files in subdirectories with additional files.""" + # create array + image = np.ones((10, 10)) + + # save array to /npy + path1 = tmp_path / "test1.npy" + np.save(path1, image) + + path2 = tmp_path / "test2.npy" + np.save(path2, image) + + # save text file + path3 = tmp_path / "test3.txt" + path3.write_text("test") + + # save file in subdirectory + subdirectory = tmp_path / "subdir" + subdirectory.mkdir() + path4 = subdirectory / "test3.npy" + np.save(path4, image) + + # create file list (even the text file is selected) + ref_files = [path1, path2, path3, path4] + + # list files using parent directory + files = list_files(tmp_path, SupportedData.CUSTOM) + assert len(files) == 4 + assert set(files) == set(ref_files) + + # list files using the file extension filter + files = list_files(tmp_path, SupportedData.CUSTOM, "*.npy") + assert len(files) == 3 + assert set(files) == {path1, path2, path4} + + +def test_validate_source_target_files(tmp_path: Path): + """Test that it passes for two folders with same number of files and same names.""" + # create two subfolders + src = tmp_path / "src" + src.mkdir() + + tar = tmp_path / "tar" + tar.mkdir() + + # populate with files + filename_1 = "test1.txt" + filename_2 = "test2.txt" + + (tmp_path / "src" / filename_1).write_text("test") + (tmp_path / "tar" / filename_1).write_text("test") + + (tmp_path / "src" / filename_2).write_text("test") + (tmp_path / "tar" / filename_2).write_text("test") + + # list files + src_files = list_files(src, SupportedData.CUSTOM) + tar_files = list_files(tar, SupportedData.CUSTOM) + + # validate files + validate_source_target_files(src_files, tar_files) + + +def test_validate_source_target_files_wrong_names(tmp_path: Path): + """Test that an error is raised if filenames are different.""" + # create two subfolders + src = tmp_path / "src" + src.mkdir() + + tar = tmp_path / "tar" + tar.mkdir() + + # populate with files + filename_1 = "test1.txt" + filename_2 = "test2.txt" + filename_3 = "test3.txt" + + (tmp_path / "src" / filename_1).write_text("test") + (tmp_path / "tar" / filename_1).write_text("test") + + (tmp_path / "src" / filename_2).write_text("test") + (tmp_path / "tar" / filename_3).write_text("test") + + # list files + src_files = list_files(src, SupportedData.CUSTOM) + tar_files = list_files(tar, SupportedData.CUSTOM) + + # validate files + with pytest.raises(ValueError): + validate_source_target_files(src_files, tar_files) + + +def test_validate_source_target_files_wrong_number(tmp_path: Path): + """Test that an error is raised if filenames are different.""" + # create two subfolders + src = tmp_path / "src" + src.mkdir() + + tar = tmp_path / "tar" + tar.mkdir() + + # populate with files + filename_1 = "test1.txt" + filename_2 = "test2.txt" + + (tmp_path / "src" / filename_1).write_text("test") + (tmp_path / "tar" / filename_1).write_text("test") + + (tmp_path / "src" / filename_2).write_text("test") + + # list files + src_files = list_files(src, SupportedData.CUSTOM) + tar_files = list_files(tar, SupportedData.CUSTOM) + + # validate files + with pytest.raises(ValueError): + validate_source_target_files(src_files, tar_files) diff --git a/tests/dataset/dataset_utils/test_read_tiff.py b/tests/dataset/dataset_utils/test_read_tiff.py new file mode 100644 index 00000000..5d534b8f --- /dev/null +++ b/tests/dataset/dataset_utils/test_read_tiff.py @@ -0,0 +1,32 @@ +import numpy as np +import pytest +import tifffile + +from careamics.dataset.dataset_utils.read_tiff import read_tiff + + +def test_read_tiff(tmp_path, ordered_array): + """Test reading a tiff file.""" + # create an array + array: np.ndarray = ordered_array((10, 10)) + + # save files + file = tmp_path / "test.tiff" + tifffile.imwrite(file, array) + + # read files + array_read = read_tiff(file) + np.testing.assert_array_equal(array_read, array) + + +def test_read_tiff_invalid(tmp_path): + # invalid file type + file = tmp_path / "test.txt" + file.write_text("test") + with pytest.raises(ValueError): + read_tiff(file) + + # non-existing file + file = tmp_path / "test.tiff" + with pytest.raises(FileNotFoundError): + read_tiff(file) diff --git a/tests/dataset/patching/test_patching_utils.py b/tests/dataset/patching/test_patching_utils.py new file mode 100644 index 00000000..5ac4568c --- /dev/null +++ b/tests/dataset/patching/test_patching_utils.py @@ -0,0 +1,44 @@ +import numpy as np +import pytest + +from careamics.dataset.patching.validate_patch_dimension import ( + validate_patch_dimensions, +) + + +@pytest.mark.parametrize( + "arr_shape, patch_size", + [ + ((1, 1, 8, 8), (2, 2)), + ((1, 1, 8, 8, 8), (2, 2, 2)), + ], +) +def test_patches_sanity_check(arr_shape, patch_size): + arr = np.zeros(arr_shape) + is_3d_patch = len(patch_size) == 3 + # check if the patch is 2D or 3D. Subtract 1 because the first dimension is sample + validate_patch_dimensions(arr, patch_size, is_3d_patch) + + +@pytest.mark.parametrize( + "arr_shape, patch_size", + [ + # Wrong number of dimensions 2D + # minimum 3 dimensions CYX + ((10, 10), (5, 5, 5)), + # Wrong number of dimensions 3D + ((1, 1, 10, 10, 10), (5, 5)), + # Wrong z patch size + ((1, 10, 10), (5, 5, 5)), + ((10, 10, 10), (10, 5, 5)), + # Wrong YX patch sizes + ((1, 10, 10), (12, 5)), + ((1, 10, 10), (5, 11)), + ], +) +def test_patches_sanity_check_invalid_cases(arr_shape, patch_size): + arr = np.zeros(arr_shape) + is_3d_patch = len(patch_size) == 3 + # check if the patch is 2D or 3D. Subtract 1 because the first dimension is sample + with pytest.raises(ValueError): + validate_patch_dimensions(arr, patch_size, is_3d_patch) diff --git a/tests/dataset/patching/test_random_patching.py b/tests/dataset/patching/test_random_patching.py new file mode 100644 index 00000000..6bc85f74 --- /dev/null +++ b/tests/dataset/patching/test_random_patching.py @@ -0,0 +1,105 @@ +import numpy as np +import pytest + +from careamics.dataset.patching.patching import extract_patches_random + + +@pytest.mark.parametrize( + "shape, patch_size", + [ + ((1, 1, 8, 8), (3, 3)), + ((1, 3, 8, 8), (3, 3)), + ((3, 1, 8, 8), (3, 3)), + ((2, 3, 8, 8), (3, 3)), + ((1, 1, 5, 8, 8), (3, 3, 3)), + ((1, 3, 5, 8, 8), (3, 3, 3)), + ((3, 1, 5, 8, 8), (3, 3, 3)), + ((2, 3, 5, 8, 8), (3, 3, 3)), + ], +) +def test_random_patching_unsupervised(ordered_array, shape, patch_size): + """Check that the patches are extracted correctly. + + Since extract patches is called on already shaped array, dimensions S and C are + present. + """ + np.random.seed(42) + + # create array + array = ordered_array(shape) + is_3D = len(patch_size) == 3 + top_left = [] + + for _ in range(3): + patch_generator = extract_patches_random(array, patch_size=patch_size) + + # get all patches and targets + patches = [patch for patch, _ in patch_generator] + + # check patch shape + for patch in patches: + # account for C dimension + assert patch.shape[1:] == patch_size + + # get top_left index in the original array + if is_3D: + ind = np.where(array == patch[0, 0, 0, 0]) + else: + ind = np.where(array == patch[0, 0, 0]) + + top_left.append(np.array(ind)) + + # check randomness + coords = np.array(top_left).squeeze() + assert coords.min() == 0 + assert coords.max() == max(array.shape) - max(patch_size) + assert len(np.unique(coords, axis=0)) >= 0.7 * np.prod(shape) / np.prod(patch_size) + + +# @pytest.mark.parametrize( +# "patch_size", +# [ +# (2, 2), +# (4, 2), +# (4, 8), +# (8, 8), +# ], +# ) +# def test_extract_patches_random_2d(array_2D, patch_size): +# """Test extracting patches randomly in 2D.""" +# check_extract_patches_random(array_2D, "SYX", patch_size) + + +# @pytest.mark.parametrize( +# "patch_size", +# [ +# (2, 2), +# (4, 2), +# (4, 8), +# (8, 8), +# ], +# ) +# def test_extract_patches_random_supervised_2d(array_2D, patch_size): +# """Test extracting patches randomly in 2D.""" +# check_extract_patches_random( +# array_2D, +# "SYX", +# patch_size, +# target=array_2D +# ) + + +# @pytest.mark.parametrize( +# "patch_size", +# [ +# (2, 2, 4), +# (4, 2, 2), +# (2, 8, 4), +# (4, 8, 8), +# ], +# ) +# def test_extract_patches_random_3d(array_3D, patch_size): +# """Test extracting patches randomly in 3D. + +# The 3D array is a fixture of shape (1, 8, 16, 16).""" +# check_extract_patches_random(array_3D, "SZYX", patch_size) diff --git a/tests/dataset/test_dataset_utils.py b/tests/dataset/patching/test_sequential_patching.py similarity index 58% rename from tests/dataset/test_dataset_utils.py rename to tests/dataset/patching/test_sequential_patching.py index b4e35c0d..ec91cb84 100644 --- a/tests/dataset/test_dataset_utils.py +++ b/tests/dataset/patching/test_sequential_patching.py @@ -1,15 +1,62 @@ import numpy as np import pytest -from careamics.dataset.patching import ( - _compute_crop_and_stitch_coords_1d, +from careamics.dataset.patching.sequential_patching import ( _compute_number_of_patches, _compute_overlap, _compute_patch_steps, _compute_reshaped_view, + extract_patches_sequential, ) +def check_extract_patches_sequential(array: np.ndarray, axes: str, patch_size: tuple): + """Check that the patches are extracted correctly. + + The array should have been generated using np.arange and np.reshape.""" + patches, _ = extract_patches_sequential(array, axes=axes, patch_size=patch_size) + + # check patch shape + assert patches.shape[2:] == patch_size + + # check that all values are covered by the patches + n_max = np.prod(array.shape) # maximum value in the array + unique = np.unique(np.array(patches)) # unique values in the patches + assert len(unique) == n_max + + +@pytest.mark.parametrize( + "patch_size", + [ + (2, 2), + (4, 2), + (4, 8), + (8, 8), + ], +) +def test_extract_patches_sequential_2d(array_2D, patch_size): + """Test extracting patches sequentially in 2D.""" + check_extract_patches_sequential(array_2D, "SYX", patch_size) + + +@pytest.mark.parametrize( + "patch_size", + [ + (2, 2, 4), + (4, 2, 2), + (2, 8, 4), + (4, 8, 8), + ], +) +def test_extract_patches_sequential_3d(array_3D, patch_size): + """Test extracting patches sequentially in 3D. + + The 3D array is a fixture of shape (1, 8, 16, 16).""" + # TODO changed the fixture to (1, 8, 16, 16), uneven shape doesnt work. We need to + # discuss the function or the test cases + check_extract_patches_sequential(array_3D, "SZYX", patch_size) + + @pytest.mark.parametrize( "shape, patch_sizes, expected", [ @@ -71,58 +118,6 @@ def check_compute_reshaped_view(array, window_shape, steps): assert output.shape == (np.prod(n_patches), *window_shape) -@pytest.mark.parametrize("axis_size", [32, 35, 40]) -@pytest.mark.parametrize("patch_size, overlap", [(16, 4), (8, 6), (16, 8), (32, 24)]) -def test_compute_crop_and_stitch_coords_1d(axis_size, patch_size, overlap): - ( - crop_coords, - stitch_coords, - overlap_crop_coords, - ) = _compute_crop_and_stitch_coords_1d(axis_size, patch_size, overlap) - - # check that the number of patches is sufficient to cover the whole axis and that - # the number of coordinates is - # the same for all three coordinate groups - num_patches = np.ceil((axis_size - overlap) / (patch_size - overlap)).astype(int) - assert ( - len(crop_coords) - == len(stitch_coords) - == len(overlap_crop_coords) - == num_patches - ) - # check if 0 is the first coordinate, axis_size is last coordinate in all three - # coordinate groups - assert all( - all((group[0][0] == 0, group[-1][1] == axis_size)) - for group in [crop_coords, stitch_coords] - ) - # check if neighboring stitch coordinates are equal - assert all( - stitch_coords[i][1] == stitch_coords[i + 1][0] - for i in range(len(stitch_coords) - 1) - ) - - # check that the crop coordinates cover the whole axis - assert ( - np.sum(np.array(crop_coords)[:, 1] - np.array(crop_coords)[:, 0]) - == patch_size * num_patches - ) - - # check that the overlap crop coordinates cover the whole axis - assert ( - np.sum( - np.array(overlap_crop_coords)[:, 1] - np.array(overlap_crop_coords)[:, 0] - ) - == axis_size - ) - - # check that shape of all cropped tiles is equal - assert np.array_equal( - np.array(overlap_crop_coords)[:, 1] - np.array(overlap_crop_coords)[:, 0], - np.array(stitch_coords)[:, 1] - np.array(stitch_coords)[:, 0], - ) - - @pytest.mark.parametrize( "window_shape, steps", [ diff --git a/tests/dataset/patching/test_tiled_patching.py b/tests/dataset/patching/test_tiled_patching.py new file mode 100644 index 00000000..b8556d2b --- /dev/null +++ b/tests/dataset/patching/test_tiled_patching.py @@ -0,0 +1,112 @@ +import numpy as np +import pytest + +from careamics.dataset.patching.tiled_patching import ( + _compute_crop_and_stitch_coords_1d, + extract_tiles, +) + + +def check_extract_tiles(array: np.ndarray, axes, tile_size, overlaps): + """Test extracting patches randomly.""" + tile_data_generator = extract_tiles(array, axes, tile_size, overlaps) + + tiles = [] + all_overlap_crop_coords = [] + all_stitch_coords = [] + # Assemble all tiles and their respective coordinates + for tile_data in tile_data_generator: + tile, _, _, overlap_crop_coords, stitch_coords = tile_data + tiles.append(tile) + all_overlap_crop_coords.append(overlap_crop_coords) + all_stitch_coords.append(stitch_coords) + + # check tile shape, ignore sample dimension + assert tile.shape[2:] == tile_size + assert len(overlap_crop_coords) == len(stitch_coords) == len(tile_size) + + # check that each tile has a unique set of coordinates + assert len(tiles) == len(all_overlap_crop_coords) == len(all_stitch_coords) + + # check that all values are covered by the tiles + n_max = np.prod(array.shape) # maximum value in the array + unique = np.unique(np.array(tiles)) # unique values in the patches + assert len(unique) >= n_max + + +@pytest.mark.parametrize( + "tile_size, axes, overlaps", + [ + ((4, 4), "SYX", (2, 2)), + ((8, 8), "SYX", (4, 4)), + ], # TODO add more test cases with axes +) +def test_extract_tiles_2d(array_2D, axes, tile_size, overlaps): + """Test extracting tiles for prediction in 2D.""" + check_extract_tiles(array_2D, axes, tile_size, overlaps) + + +@pytest.mark.parametrize( + "tile_size, axes, overlaps", + [ + ((4, 4, 4), "SZYX", (2, 2, 2)), + ((8, 8, 8), "SZYX", (4, 4, 4)), + ], +) +def test_extract_tiles_3d(array_3D, axes, tile_size, overlaps): + """Test extracting tiles for prediction in 3D. + + The 3D array is a fixture of shape (1, 8, 16, 16).""" + check_extract_tiles(array_3D, axes, tile_size, overlaps) + + +@pytest.mark.parametrize("axis_size", [32, 35, 40]) +@pytest.mark.parametrize("patch_size, overlap", [(16, 4), (8, 6), (16, 8), (32, 24)]) +def test_compute_crop_and_stitch_coords_1d(axis_size, patch_size, overlap): + ( + crop_coords, + stitch_coords, + overlap_crop_coords, + ) = _compute_crop_and_stitch_coords_1d(axis_size, patch_size, overlap) + + # check that the number of patches is sufficient to cover the whole axis and that + # the number of coordinates is + # the same for all three coordinate groups + num_patches = np.ceil((axis_size - overlap) / (patch_size - overlap)).astype(int) + assert ( + len(crop_coords) + == len(stitch_coords) + == len(overlap_crop_coords) + == num_patches + ) + # check if 0 is the first coordinate, axis_size is last coordinate in all three + # coordinate groups + assert all( + all((group[0][0] == 0, group[-1][1] == axis_size)) + for group in [crop_coords, stitch_coords] + ) + # check if neighboring stitch coordinates are equal + assert all( + stitch_coords[i][1] == stitch_coords[i + 1][0] + for i in range(len(stitch_coords) - 1) + ) + + # check that the crop coordinates cover the whole axis + assert ( + np.sum(np.array(crop_coords)[:, 1] - np.array(crop_coords)[:, 0]) + == patch_size * num_patches + ) + + # check that the overlap crop coordinates cover the whole axis + assert ( + np.sum( + np.array(overlap_crop_coords)[:, 1] - np.array(overlap_crop_coords)[:, 0] + ) + == axis_size + ) + + # check that shape of all cropped tiles is equal + assert np.array_equal( + np.array(overlap_crop_coords)[:, 1] - np.array(overlap_crop_coords)[:, 0], + np.array(stitch_coords)[:, 1] - np.array(stitch_coords)[:, 0], + ) diff --git a/tests/dataset/test_in_memory_dataset.py b/tests/dataset/test_in_memory_dataset.py new file mode 100644 index 00000000..562aa231 --- /dev/null +++ b/tests/dataset/test_in_memory_dataset.py @@ -0,0 +1,106 @@ +import numpy as np +import pytest +import tifffile + +from careamics.config import DataModel +from careamics.config.support import SupportedData +from careamics.dataset import InMemoryDataset + + +def test_number_of_patches(ordered_array): + """Test the number of patches extracted from InMemoryDataset.""" + # create array + array = ordered_array((20, 20)) + + # create config + config_dict = { + "data_type": SupportedData.ARRAY.value, + "patch_size": [4, 4], + "axes": "YX", + } + config = DataModel(**config_dict) + + # create dataset + dataset = InMemoryDataset( + data_config=config, + data=array, + ) + + # check number of patches + assert dataset.get_number_of_patches() == dataset.patches.shape[0] + + +@pytest.mark.parametrize("percentage", [0.1, 0.6]) +def test_extracting_val_array(ordered_array, percentage): + """Test extracting a validation set patches from InMemoryDataset.""" + # create array + array = ordered_array((20, 20)) + + # create config + config_dict = { + "data_type": SupportedData.ARRAY.value, + "patch_size": [4, 4], + "axes": "YX", + } + config = DataModel(**config_dict) + + # create dataset + dataset = InMemoryDataset( + data_config=config, + data=array, + ) + + # compute number of patches + total_n_patches = dataset.get_number_of_patches() + minimum_patches = 5 + n_patches = max(round(percentage * total_n_patches), minimum_patches) + + # extract datset + valset = dataset.split_dataset(percentage, minimum_patches) + + # check number of patches + assert valset.get_number_of_patches() == n_patches + assert dataset.get_number_of_patches() == total_n_patches - n_patches + + # check that none of the validation patch values are in the original dataset + assert np.in1d(valset.patches, dataset.patches).sum() == 0 + + +@pytest.mark.parametrize("percentage", [0.1, 0.6]) +def test_extracting_val_files(tmp_path, ordered_array, percentage): + """Test extracting a validation set patches from InMemoryDataset.""" + # create array + array = ordered_array((20, 20)) + + # save array to file + file_path = tmp_path / "array.tif" + tifffile.imwrite(file_path, array) + + # create config + config_dict = { + "data_type": SupportedData.ARRAY.value, + "patch_size": [4, 4], + "axes": "YX", + } + config = DataModel(**config_dict) + + # create dataset + dataset = InMemoryDataset( + data_config=config, + data=[file_path], + ) + + # compute number of patches + total_n_patches = dataset.get_number_of_patches() + minimum_patches = 5 + n_patches = max(round(percentage * total_n_patches), minimum_patches) + + # extract datset + valset = dataset.split_dataset(percentage, minimum_patches) + + # check number of patches + assert valset.get_number_of_patches() == n_patches + assert dataset.get_number_of_patches() == total_n_patches - n_patches + + # check that none of the validation patch values are in the original dataset + assert np.in1d(valset.patches, dataset.patches).sum() == 0 diff --git a/tests/dataset/test_iterable_dataset.py b/tests/dataset/test_iterable_dataset.py new file mode 100644 index 00000000..85e77423 --- /dev/null +++ b/tests/dataset/test_iterable_dataset.py @@ -0,0 +1,141 @@ +import numpy as np +import pytest +import tifffile + +from careamics.config import DataModel +from careamics.config.support import SupportedData +from careamics.dataset import IterableDataset +from careamics.dataset.dataset_utils import read_tiff + + +@pytest.mark.parametrize( + "shape", + [ + # 2D + (20, 20), + # 3D + (20, 20, 20), + ], +) +def test_number_of_files(tmp_path, ordered_array, shape): + """Test number of files in IterableDataset.""" + # create array + array_size = 20 + patch_size = 4 + n_files = 3 + factor = len(shape) + axes = "YX" if factor == 2 else "ZYX" + patch_sizes = [patch_size] * factor + array = ordered_array(shape) + + # save three files + files = [] + for i in range(n_files): + file = tmp_path / f"array{i}.tif" + tifffile.imwrite(file, array) + files.append(file) + + # create config + config_dict = { + "data_type": SupportedData.TIFF.value, + "patch_size": patch_sizes, + "axes": axes, + } + config = DataModel(**config_dict) + + # create dataset + dataset = IterableDataset( + data_config=config, src_files=files, read_source_func=read_tiff + ) + + # check number of files + assert dataset.data_files == files + + # iterate over dataset + patches = list(dataset) + assert len(patches) == n_files * (array_size / patch_size) ** factor + + +def test_read_function(tmp_path, ordered_array): + """Test reading files in IterableDataset using a custom read function.""" + + # read function for .npy files + def read_npy(file_path, *args, **kwargs): + return np.load(file_path) + + array_size = 20 + patch_size = 4 + n_files = 3 + patch_sizes = [patch_size] * 2 + + # create array + array = ordered_array((n_files, array_size, array_size)) + + # save each plane in a single .npy file + files = [] + for i in range(array.shape[0]): + file_path = tmp_path / f"array{i}.npy" + np.save(file_path, array[i]) + files.append(file_path) + + # create config + config_dict = { + "data_type": SupportedData.CUSTOM.value, + "patch_size": patch_sizes, + "axes": "YX", + } + config = DataModel(**config_dict) + + # create dataset + dataset = IterableDataset( + data_config=config, + src_files=files, + read_source_func=read_npy, + ) + assert dataset.data_files == files + + # iterate over dataset + patches = list(dataset) + assert len(patches) == n_files * (array_size / patch_size) ** 2 + + +@pytest.mark.parametrize("percentage", [0.1, 0.6]) +def test_extracting_val_files(tmp_path, ordered_array, percentage): + """Test extracting a validation set patches from InMemoryDataset.""" + # create array + array = ordered_array((20, 20)) + + # save array to 25 files + files = [] + for i in range(25): + file_path = tmp_path / f"array{i}.tif" + tifffile.imwrite(file_path, array) + files.append(file_path) + + # create config + config_dict = { + "data_type": SupportedData.TIFF.value, + "patch_size": [4, 4], + "axes": "YX", + } + config = DataModel(**config_dict) + + # create dataset + dataset = IterableDataset( + data_config=config, src_files=files, read_source_func=read_tiff + ) + + # compute number of patches + total_n_files = dataset.get_number_of_files() + minimum_files = 5 + n_files = max(round(percentage * total_n_files), minimum_files) + + # extract datset + valset = dataset.split_dataset(percentage, minimum_files) + + # check number of patches + assert valset.get_number_of_files() == n_files + assert dataset.get_number_of_files() == total_n_files - n_files + + # check that none of the validation files are in the original dataset + assert set(valset.data_files).isdisjoint(set(dataset.data_files)) diff --git a/tests/dataset/test_patching.py b/tests/dataset/test_patching.py deleted file mode 100644 index 1fee6a81..00000000 --- a/tests/dataset/test_patching.py +++ /dev/null @@ -1,212 +0,0 @@ -import numpy as np -import pytest - -from careamics.dataset.patching import ( - _extract_patches_random, - _extract_patches_sequential, - _extract_tiles, - _patches_check_and_update, -) - - -def check_extract_patches_sequential(array, axes, patch_size): - """Check that the patches are extracted correctly. - - The array should have been generated using np.arange and np.reshape.""" - patches, _ = _extract_patches_sequential(array, axes=axes, patch_size=patch_size) - - # check patch shape - assert patches.shape[2:] == patch_size - - # check that all values are covered by the patches - n_max = np.prod(array.shape) # maximum value in the array - unique = np.unique(np.array(patches)) # unique values in the patches - assert len(unique) == n_max - - -def check_extract_patches_random(array, axes, patch_size, target=None): - """Check that the patches are extracted correctly. - - The array should have been generated using np.arange and np.reshape.""" - - patch_generator = _extract_patches_random( - array, axes=axes, patch_size=patch_size, target=target - ) - - # check patch shape - for patch, target in patch_generator: - assert patch.shape[2:] == patch_size - if target is not None: - assert target.shape[2:] == patch_size - - -def check_extract_tiles(array, axes, tile_size, overlaps): - """Test extracting patches randomly.""" - tile_data_generator = _extract_tiles(array, axes, tile_size, overlaps) - - tiles = [] - all_overlap_crop_coords = [] - all_stitch_coords = [] - # Assemble all tiles and their respective coordinates - for tile_data in tile_data_generator: - tile, _, _, overlap_crop_coords, stitch_coords = tile_data - tiles.append(tile) - all_overlap_crop_coords.append(overlap_crop_coords) - all_stitch_coords.append(stitch_coords) - - # check tile shape, ignore sample dimension - assert tile.shape[2:] == tile_size - assert len(overlap_crop_coords) == len(stitch_coords) == len(tile_size) - - # check that each tile has a unique set of coordinates - assert len(tiles) == len(all_overlap_crop_coords) == len(all_stitch_coords) - - # check that all values are covered by the tiles - n_max = np.prod(array.shape) # maximum value in the array - unique = np.unique(np.array(tiles)) # unique values in the patches - assert len(unique) >= n_max - - -@pytest.mark.parametrize( - "arr_shape, patch_size", - [ - ((1, 1, 8, 8), (2, 2)), - ((1, 1, 8, 8, 8), (2, 2, 2)), - ], -) -def test_patches_sanity_check(arr_shape, patch_size): - arr = np.zeros(arr_shape) - is_3d_patch = len(patch_size) == 3 - # check if the patch is 2D or 3D. Subtract 1 because the first dimension is sample - _patches_check_and_update(arr, patch_size, is_3d_patch) - - -@pytest.mark.parametrize( - "arr_shape, patch_size", - [ - # Wrong number of dimensions 2D - # minimum 3 dimensions CYX - ((10, 10), (5, 5, 5)), - # Wrong number of dimensions 3D - ((1, 1, 10, 10, 10), (5, 5)), - # Wrong z patch size - ((1, 10, 10), (5, 5, 5)), - ((10, 10, 10), (10, 5, 5)), - # Wrong YX patch sizes - ((1, 10, 10), (12, 5)), - ((1, 10, 10), (5, 11)), - ], -) -def test_patches_sanity_check_invalid_cases(arr_shape, patch_size): - arr = np.zeros(arr_shape) - is_3d_patch = len(patch_size) == 3 - # check if the patch is 2D or 3D. Subtract 1 because the first dimension is sample - with pytest.raises(ValueError): - _patches_check_and_update(arr, patch_size, is_3d_patch) - - -@pytest.mark.parametrize( - "patch_size", - [ - (2, 2), - (4, 2), - (4, 8), - (8, 8), - ], -) -def test_extract_patches_sequential_2d(array_2D, patch_size): - """Test extracting patches sequentially in 2D.""" - check_extract_patches_sequential(array_2D, "SYX", patch_size) - - -@pytest.mark.parametrize( - "patch_size", - [ - (2, 2, 4), - (4, 2, 2), - (2, 8, 4), - (4, 8, 8), - ], -) -def test_extract_patches_sequential_3d(array_3D, patch_size): - """Test extracting patches sequentially in 3D. - - The 3D array is a fixture of shape (1, 8, 16, 16).""" - # TODO changed the fixture to (1, 8, 16, 16), uneven shape doesnt work. We need to - # discuss the function or the test cases - check_extract_patches_sequential(array_3D, "SZYX", patch_size) - - -@pytest.mark.parametrize( - "patch_size", - [ - (2, 2), - (4, 2), - (4, 8), - (8, 8), - ], -) -def test_extract_patches_random_2d(array_2D, patch_size): - """Test extracting patches randomly in 2D.""" - check_extract_patches_random(array_2D, "SYX", patch_size) - - -@pytest.mark.parametrize( - "patch_size", - [ - (2, 2), - (4, 2), - (4, 8), - (8, 8), - ], -) -def test_extract_patches_random_supervised_2d(array_2D, patch_size): - """Test extracting patches randomly in 2D.""" - check_extract_patches_random( - array_2D, - "SYX", - patch_size, - target=array_2D - ) - - -@pytest.mark.parametrize( - "patch_size", - [ - (2, 2, 4), - (4, 2, 2), - (2, 8, 4), - (4, 8, 8), - ], -) -def test_extract_patches_random_3d(array_3D, patch_size): - """Test extracting patches randomly in 3D. - - The 3D array is a fixture of shape (1, 8, 16, 16).""" - check_extract_patches_random(array_3D, "SZYX", patch_size) - - -@pytest.mark.parametrize( - "tile_size, axes, overlaps", - [ - ((4, 4), "SYX", (2, 2)), - ((8, 8), "SYX", (4, 4)), - ], # TODO add more test cases with axes -) -def test_extract_tiles_2d(array_2D, axes, tile_size, overlaps): - """Test extracting tiles for prediction in 2D.""" - check_extract_tiles(array_2D, axes, tile_size, overlaps) - - -@pytest.mark.parametrize( - "tile_size, axes, overlaps", - [ - ((4, 4, 4), "SZYX", (2, 2, 2)), - ((8, 8, 8), "SZYX", (4, 4, 4)), - ], -) -def test_extract_tiles_3d(array_3D, axes, tile_size, overlaps): - """Test extracting tiles for prediction in 3D. - - The 3D array is a fixture of shape (1, 8, 16, 16).""" - check_extract_tiles(array_3D, axes, tile_size, overlaps) diff --git a/tests/dataset/test_prepare_dataset.py b/tests/dataset/test_prepare_dataset.py deleted file mode 100644 index 65f2bab7..00000000 --- a/tests/dataset/test_prepare_dataset.py +++ /dev/null @@ -1,90 +0,0 @@ -import numpy as np -import pytest - -from careamics.dataset.dataset_utils import list_files, read_tiff, reshape_data - -# TODO read source in one place, inside or outside dataset -# update axes for masks ? - - -def test_read_tiff(example_data_path): - read_tiff(example_data_path[0], axes="SYX") - - -def test_read_tiff_raises(example_data_path): - with pytest.raises(ValueError): - read_tiff(example_data_path[1]) - - -@pytest.mark.parametrize( - "shape, axes, final_shape, final_axes", - [ - ((16, 8), "YX", (1, 1, 16, 8), "SCYX"), - ((16, 8), "XY", (1, 1, 8, 16), "SCYX"), - ((16, 3, 8), "XZY", (1, 1, 3, 8, 16), "SCZYX"), - ((16, 3, 8), "ZXY", (1, 1, 16, 8, 3), "SCZYX"), - ((16, 3, 12), "SXY", (16, 1, 12, 3), "SCYX"), - ((5, 5, 2), "XYS", (2, 1, 5, 5), "SCYX"), - ((5, 1, 5, 2), "XZYS", (2, 1, 1, 5, 5), "SCZYX"), - ((5, 12, 5, 2), "ZXYS", (2, 1, 5, 5, 12), "SCZYX"), - ((16, 8, 5, 12), "SZYX", (16, 1, 8, 5, 12), "SCZYX"), - ((16, 8, 5), "YXT", (5, 1, 16, 8), "SCYX"), # T, no C - ((4, 16, 8), "TXY", (4, 1, 8, 16), "SCYX"), - ((4, 16, 6, 8), "TXSY", (4 * 6, 1, 8, 16), "SCYX"), - ((4, 16, 6, 5, 8), "ZXTYS", (8 * 6, 1, 4, 5, 16), "SCZYX"), - ((5, 3, 5), "XCY", (1, 3, 5, 5), "SCYX"), # C, no T - ((16, 3, 12, 8), "XCYS", (8, 3, 12, 16), "SCYX"), - ((16, 12, 3, 8), "ZXCY", (1, 3, 16, 8, 12), "SCZYX"), - ((16, 3, 12, 8), "XCYZ", (1, 3, 8, 12, 16), "SCZYX"), - ((16, 8, 12, 3), "ZYXC", (1, 3, 16, 8, 12), "SCZYX"), - ((16, 8, 21, 12, 3), "ZYSXC", (21, 3, 16, 8, 12), "SCZYX"), - ((16, 21, 8, 3, 12), "SZYCX", (16, 3, 21, 8, 12), "SCZYX"), - ((5, 3, 8, 6), "XTCY", (3, 8, 6, 5), "SCYX"), # CT - ((16, 3, 12, 5, 8), "XCYTS", (8 * 5, 3, 12, 16), "SCYX"), - ((16, 10, 5, 6, 12, 8), "ZSXCYT", (10 * 8, 6, 16, 12, 5), "SCZYX"), - ], -) -def test_update_axes(shape, axes, final_shape, final_axes): - array = np.zeros(shape) - - new_array, new_axes = reshape_data(array, axes) - assert new_array.shape == final_shape - assert new_axes == final_axes - - -@pytest.mark.parametrize( - "shape, axes", - [ - ((1, 16, 8), "YX"), - ((1, 16, 3, 8), "XZY"), - ], -) -def test_update_axes_raises(shape, axes): - array = np.zeros(shape) - - with pytest.raises(ValueError): - reshape_data(array, axes) - - -def test_list_files(example_data_path): - train_path, _, _ = example_data_path - - files_from_path = list_files(train_path, "tif") - assert len(files_from_path) >= 1 - assert all(file.suffix == ".tif" for file in files_from_path) - - files_from_str = list_files(train_path._str, "tif") - assert len(files_from_str) >= 1 - assert all(file.suffix == ".tif" for file in files_from_str) - - files_from_list = list_files([train_path], "tif") - assert len(files_from_list) >= 1 - assert all(file.suffix == ".tif" for file in files_from_list) - - -def test_list_files_raises(example_data_path): - train_path, _, _ = example_data_path - - with pytest.raises(ValueError): - list_files(train_path, "jpg") - list_files(train_path.name, "tif") diff --git a/tests/models/test_model_factory.py b/tests/models/test_model_factory.py index 7dfadf37..74952439 100644 --- a/tests/models/test_model_factory.py +++ b/tests/models/test_model_factory.py @@ -1,14 +1,62 @@ import pytest +from torch import nn, ones -from careamics.config.architectures import UNetModel -from careamics.models.model_factory import model_registry +from careamics.config.architectures import ( + CustomModel, + UNetModel, + VAEModel, + register_model, +) +from careamics.config.support import SupportedArchitecture +from careamics.models import UNet, model_factory -# TODO generalize to other architecture def test_model_registry_unet(): + """Test that""" model_config = { "architecture": "UNet", } # instantiate model - model_registry(UNetModel(**model_config)) + model = model_factory(UNetModel(**model_config)) + assert isinstance(model, UNet) + + +def test_model_registry_custom(): + """Test that a custom model can be retrieved and instantiated.""" + + # create and register a custom model + @register_model(name="linear_model") + class LinearModel(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + + self.in_features = in_features + self.out_features = out_features + self.weight = nn.Parameter(ones(in_features, out_features)) + self.bias = nn.Parameter(ones(out_features)) + + def forward(self, input): + return (input @ self.weight) + self.bias + + model_config = { + "architecture": SupportedArchitecture.CUSTOM.value, + "name": "linear_model", + "parameters": {"in_features": 10, "out_features": 5}, + } + + # instantiate model + model = model_factory(CustomModel(**model_config)) + assert isinstance(model, LinearModel) + assert model.in_features == 10 + assert model.out_features == 5 + + +def test_vae(): + """Test that VAE are currently not supported.""" + model_config = { + "architecture": SupportedArchitecture.VAE.value, + } + + with pytest.raises(NotImplementedError): + model_factory(VAEModel(**model_config)) diff --git a/tests/models/test_unet.py b/tests/models/test_unet.py index 381608d0..a29f1103 100644 --- a/tests/models/test_unet.py +++ b/tests/models/test_unet.py @@ -48,6 +48,7 @@ def test_blurpool2d(input_shape): [1, 1] + [i // 2 for i in input_shape[2:]] ) + @pytest.mark.parametrize( "input_shape", [ diff --git a/tests/test_prediction_utils.py b/tests/prediction/test_prediction_utils.py similarity index 82% rename from tests/test_prediction_utils.py rename to tests/prediction/test_prediction_utils.py index d7f15462..53acdcfc 100644 --- a/tests/test_prediction_utils.py +++ b/tests/prediction/test_prediction_utils.py @@ -1,16 +1,17 @@ import pytest -from careamics.dataset.patching import _extract_tiles +from careamics.dataset.patching.tiled_patching import extract_tiles from careamics.prediction.prediction_utils import stitch_prediction @pytest.mark.parametrize( "input_shape, axes, tile_size, overlaps", - [ ((8, 8), "YX", (4, 4), (2, 2)), + [ + ((8, 8), "YX", (4, 4), (2, 2)), ((1, 8, 8), "SYX", (4, 4), (2, 2)), ((1, 7, 9), "SYX", (4, 4), (2, 2)), ((1, 9, 7, 8), "SZYX", (4, 4, 4), (2, 2, 2)), - ((321, 481), 'YX', (256, 256), (48, 48)) + ((321, 481), "YX", (256, 256), (48, 48)), ], ) def test_stitch_prediction(input_shape, axes, ordered_array, tile_size, overlaps): @@ -23,7 +24,7 @@ def test_stitch_prediction(input_shape, axes, ordered_array, tile_size, overlaps stitching_data = [] # extract tiles - tiling_outputs = _extract_tiles(arr, axes, tile_size, overlaps) + tiling_outputs = extract_tiles(arr, axes, tile_size, overlaps) # Assemble all tiles as it's done during the prediction stage for tile_data in tiling_outputs: diff --git a/tests/test_careamics_kiln.py b/tests/test_careamics_kiln.py deleted file mode 100644 index a8d7ae2e..00000000 --- a/tests/test_careamics_kiln.py +++ /dev/null @@ -1,30 +0,0 @@ -from pytorch_lightning import LightningModule - -from careamics import CAREamicsModule - -def test_careamics_module(minimum_algorithm): - """Test CAREamicsModule class as an intermediate layer.""" - # extract model parameters - model_parameters = minimum_algorithm["model"].copy() - model_parameters.pop("architecture") - - # extract optimizer and scheduler parameters - opt = minimum_algorithm["optimizer"] - optimizer_parameters = opt["parameters"] if "parameters" in opt else None - - lr = minimum_algorithm["lr_scheduler"] - lr_scheduler_parameters = lr["parameters"] if "parameters" in lr else None - - # instantiate CAREamicsModule - module = CAREamicsModule( - algorithm_type=minimum_algorithm["algorithm_type"], - loss=minimum_algorithm["loss"], - architecture=minimum_algorithm["model"]["architecture"], - model_parameters=model_parameters, - optimizer=opt["name"], - optimizer_parameters=optimizer_parameters, - lr_scheduler=lr["name"], - lr_scheduler_parameters=lr_scheduler_parameters, - ) - - assert isinstance(module, LightningModule) \ No newline at end of file diff --git a/tests/test_careamist.py b/tests/test_careamist.py index db8c7de4..b7c28d0b 100644 --- a/tests/test_careamist.py +++ b/tests/test_careamist.py @@ -1,14 +1,19 @@ +import numpy as np import pytest +import tifffile from careamics import CAREamist, Configuration, save_configuration +from careamics.config.support import SupportedAlgorithm, SupportedData def test_no_parameters(): + """Test that CAREamics cannot be instantiated without parameters.""" with pytest.raises(ValueError): CAREamist() -def test_minimum_configuration_object(minimum_configuration): +def test_minimum_configuration_via_object(minimum_configuration): + """Test that CAREamics can be instantiated with a minimum configuration object.""" # create configuration config = Configuration(**minimum_configuration) @@ -16,10 +21,141 @@ def test_minimum_configuration_object(minimum_configuration): CAREamist(configuration=config) -def test_minimum_configuration_path(tmp_path, minimum_configuration): +def test_minimum_configuration_via_path(tmp_path, minimum_configuration): + """Test that CAREamics can be instantiated with a path to a minimum + configuration. + """ # create configuration config = Configuration(**minimum_configuration) path_to_config = save_configuration(config, tmp_path) # instantiate CAREamist - CAREamist(path_to_config=path_to_config) \ No newline at end of file + CAREamist(path_to_config=path_to_config) + + +def test_train_error_target_unsupervised_algorithm(tmp_path, minimum_configuration): + """Test that an error is raised when a target is provided for an unsupervised + algorithm. + """ + # create configuration + config = Configuration(**minimum_configuration) + config.algorithm.algorithm = SupportedAlgorithm.N2V.value + + # train error with Paths + config.data.data_type = SupportedData.TIFF.value + careamics = CAREamist(configuration=config) + with pytest.raises(ValueError): + careamics.train( + path_to_train_data=tmp_path, + path_to_train_target=tmp_path, + ) + + # train error with strings + with pytest.raises(ValueError): + careamics.train( + path_to_train_data=str(tmp_path), + path_to_train_target=str(tmp_path), + ) + + # train error with arrays + config.data.data_type = SupportedData.ARRAY.value + careamics = CAREamist(configuration=config) + with pytest.raises(ValueError): + careamics.train( + train_data=np.ones((32, 32)), + train_target=np.ones((32, 32)), + ) + + +def test_train_array(minimum_configuration): + """Test that CAREamics can be trained with arrays.""" + # training data + train_array = np.ones((32, 32)) + val_array = np.ones((32, 32)) + + # create configuration + config = Configuration(**minimum_configuration) + config.training.num_epochs = 1 + config.training.batch_size = 2 + config.data.axes = "YX" + config.data.data_type = SupportedData.ARRAY.value + config.data.patch_size = (8, 8) + + # instantiate CAREamist + careamist = CAREamist(configuration=config) + + # train CAREamist + careamist.train(train_array, val_array) + + # check that it recorded mean and std + assert careamist.cfg.data.mean is not None + assert careamist.cfg.data.std is not None + # TODO somethign to check that it trained, maybe through callback + + +def test_train_tiff_files_in_memory(tmp_path, minimum_configuration): + """Test that CAREamics can be trained with tiff files in memory.""" + # training data + train_array = np.ones((32, 32)) + val_array = np.ones((32, 32)) + + # save files + train_file = tmp_path / "train.tiff" + tifffile.imwrite(train_file, train_array) + + val_file = tmp_path / "val.tiff" + tifffile.imwrite(val_file, val_array) + + # create configuration + config = Configuration(**minimum_configuration) + config.training.num_epochs = 1 + config.training.batch_size = 2 + config.data.axes = "YX" + config.data.data_type = SupportedData.TIFF.value + config.data.patch_size = (8, 8) + + # instantiate CAREamist + careamist = CAREamist(configuration=config) + + # train CAREamist + careamist.train(train_file, val_file) + + # check that it recorded mean and std + assert careamist.cfg.data.mean is not None + assert careamist.cfg.data.std is not None + # TODO somethign to check that it trained, maybe through callback + + +def test_train_tiff_files(tmp_path, minimum_configuration): + """Test that CAREamics can be trained with tiff files by deactivating + the in memory dataset. + """ + # training data + train_array = np.ones((32, 32)) + val_array = np.ones((32, 32)) + + # save files + train_file = tmp_path / "train.tiff" + tifffile.imwrite(train_file, train_array) + + val_file = tmp_path / "val.tiff" + tifffile.imwrite(val_file, val_array) + + # create configuration + config = Configuration(**minimum_configuration) + config.training.num_epochs = 1 + config.training.batch_size = 2 + config.data.axes = "YX" + config.data.data_type = SupportedData.TIFF.value + config.data.patch_size = (8, 8) + + # instantiate CAREamist + careamist = CAREamist(configuration=config) + + # train CAREamist + careamist.train(train_file, val_file, use_in_memory=False) + + # check that it recorded mean and std + assert careamist.cfg.data.mean is not None + assert careamist.cfg.data.std is not None + # TODO somethign to check that it trained, maybe through callback diff --git a/tests/test_conftest.py b/tests/test_conftest.py index c76a4100..2e3ff17f 100644 --- a/tests/test_conftest.py +++ b/tests/test_conftest.py @@ -1,7 +1,7 @@ from careamics import Configuration -from careamics.config.algorithm import AlgorithmModel -from careamics.config.data import DataModel -from careamics.config.training import Training +from careamics.config.algorithm_model import AlgorithmModel +from careamics.config.data_model import DataModel +from careamics.config.training_model import Training def test_minimum_algorithm(minimum_algorithm): diff --git a/tests/test_engine.py b/tests/test_engine.py deleted file mode 100644 index 4e82391d..00000000 --- a/tests/test_engine.py +++ /dev/null @@ -1,64 +0,0 @@ -# import pytest - -# from careamics.config import Configuration -# from careamics.engine import Engine -# from careamics.models import create_model - - -# def test_engine_init_errors(): -# with pytest.raises(ValueError): -# Engine(config=None, config_path=None, model_path=None) - -# with pytest.raises(TypeError): -# Engine(config="config", config_path=None, model_path=None) - -# with pytest.raises(FileNotFoundError): -# Engine(config=None, config_path="some/path", model_path=None) - -# with pytest.raises(FileNotFoundError): -# Engine(config=None, config_path=None, model_path="some/other/path") - - -# def test_engine_predict_errors(minimum_config: dict): -# config = Configuration(**minimum_config) -# engine = Engine(config=config) - -# with pytest.raises(ValueError): -# engine.predict(input=None) - -# config.data.mean = None -# config.data.std = None -# with pytest.raises(ValueError): -# engine.predict(input="some/path") - - -# @pytest.mark.parametrize( -# "epoch, losses", [(0, [1.0]), (1, [1.0, 0.5]), (2, [1.0, 0.5, 1.0])] -# ) -# def test_engine_save_checkpoint(epoch, losses, minimum_config: dict): -# init_config = Configuration(**minimum_config) -# engine = Engine(config=init_config) - -# # Mock engine attributes to test save_checkpoint -# engine.optimizer.param_groups[0]["lr"] = 1 -# engine.lr_scheduler.patience = 1 -# path = engine._save_checkpoint(epoch=epoch, losses=losses, save_method="state_dict") -# assert path.exists() - -# if epoch == 0: -# assert path.stem.split("_")[-1] == "best" - -# if losses[-1] == min(losses): -# assert path.stem.split("_")[-1] == "best" -# else: -# assert path.stem.split("_")[-1] == "latest" - -# model, optimizer, scheduler, scaler, config = create_model(model_path=path) -# assert all(model.children()) == all(engine.model.children()) -# assert optimizer.__class__ == engine.optimizer.__class__ -# assert scheduler.__class__ == engine.lr_scheduler.__class__ -# assert scaler.__class__ == engine.scaler.__class__ -# assert optimizer.param_groups[0]["lr"] == engine.optimizer.param_groups[0]["lr"] -# assert optimizer.defaults["lr"] != engine.optimizer.param_groups[0]["lr"] -# assert scheduler.patience == engine.lr_scheduler.patience -# assert config == init_config diff --git a/tests/test_lightning_module.py b/tests/test_lightning_module.py new file mode 100644 index 00000000..5b77b269 --- /dev/null +++ b/tests/test_lightning_module.py @@ -0,0 +1,32 @@ +from careamics.config import AlgorithmModel +from careamics.lightning_module import CAREamicsKiln, CAREamicsModule + + +def test_careamics_module(minimum_algorithm): + """Test that the minimum algorithm allows isntantiating a the Lightning API + intermediate layer.""" + algo_config = AlgorithmModel(**minimum_algorithm) + + # extract model parameters + model_parameters = algo_config.model.model_dump(exclude_none=True) + model_parameters.pop("architecture") + + # instantiate CAREamicsModule + CAREamicsModule( + algorithm=algo_config.algorithm, + loss=algo_config.loss, + architecture=algo_config.model.architecture, + model_parameters=model_parameters, + optimizer=algo_config.optimizer.name, + optimizer_parameters=algo_config.optimizer.parameters, + lr_scheduler=algo_config.lr_scheduler.name, + lr_scheduler_parameters=algo_config.lr_scheduler.parameters, + ) + + +def test_careamics_kiln(minimum_algorithm): + """Test that the minimum algorithm allows instantiating a CAREamicsKiln.""" + algo_config = AlgorithmModel(**minimum_algorithm) + + # instantiate CAREamicsKiln + CAREamicsKiln(algo_config) diff --git a/tests/transforms/test_manipulate_n2v.py b/tests/transforms/test_manipulate_n2v.py new file mode 100644 index 00000000..0d57976e --- /dev/null +++ b/tests/transforms/test_manipulate_n2v.py @@ -0,0 +1,32 @@ +import numpy as np +import pytest +from albumentations import Compose + +from careamics.config.support import SupportedPixelManipulation +from careamics.transforms import ManipulateN2V + + +@pytest.mark.parametrize( + "strategy", [SupportedPixelManipulation.UNIFORM, SupportedPixelManipulation.MEDIAN] +) +def test_manipulate_n2v(strategy): + """Test the N2V augmentation.""" + # create array + array = np.arange(16 * 16).reshape((16, 16)) + + # create augmentation + aug = Compose( + [ManipulateN2V(roi_size=5, masked_pixel_percentage=5, strategy=strategy)] + ) + + # apply augmentation + augmented = aug(image=array) + assert "image" in augmented + assert len(augmented["image"]) == 3 # transformed_patch, original_patch, mask + + # assert that the difference between the original and transformed patch are the + # same pixels that are selected by the mask + tr_path, orig_patch, mask = augmented["image"] + diff_coords = np.array(np.where(tr_path != orig_patch)) + mask_coords = np.array(np.where(mask == 1)) + assert np.array_equal(diff_coords, mask_coords) diff --git a/tests/transforms/test_nd_flip.py b/tests/transforms/test_nd_flip.py new file mode 100644 index 00000000..626b6e5d --- /dev/null +++ b/tests/transforms/test_nd_flip.py @@ -0,0 +1,129 @@ +import numpy as np +import pytest + +from careamics.transforms import NDFlip + + +def test_randomness(ordered_array): + """Test randomness of the flipping using the `p` parameter.""" + # create array + array = ordered_array((2, 2)) + + # create augmentation that never applies + aug = NDFlip(p=0.0) + + # apply augmentation + augmented = aug(image=array)["image"] + assert np.array_equal(augmented, array) + + # create augmentation that always applies + aug = NDFlip(p=1.0) + + # apply augmentation + augmented = aug(image=array)["image"] + assert not np.array_equal(augmented, array) + + +@pytest.mark.parametrize( + "shape", + [ + # 2D + (2, 2, 1), + (2, 2, 2), + # 3D + (2, 2, 2, 1), + (2, 2, 2, 2), + ], +) +def test_flip_nd(ordered_array, shape): + """Test flipping for 2D and 3D arrays.""" + np.random.seed(42) + + # create array + array: np.ndarray = ordered_array(shape) + + # create augmentation + is_3D = len(shape) == 4 + aug = NDFlip(p=1, is_3D=is_3D, flip_z=True) + + # potential flips + axes = [0, 1, 2] if is_3D else [0, 1] + flips = [np.flip(array, axis=axis) for axis in axes] + + # apply augmentation 10 times + augs = [] + for _ in range(10): + augmented = aug(image=array)["image"] + + # check that the augmented array is one of the potential flips + which_axes = [np.array_equal(augmented, flip) for flip in flips] + + assert any(which_axes) + augs.append(which_axes.index(True)) + + # check that all flips were applied + assert set(augs) == set(axes) + + +def test_flip_z(ordered_array): + """Test turning the Z flipping off.""" + np.random.seed(42) + + # create array + array: np.ndarray = ordered_array((2, 2, 2, 2)) + + # create augmentation + aug = NDFlip(p=1, is_3D=True, flip_z=False) + + # potential flips on Y and X axes + flips = [np.flip(array, axis=1), np.flip(array, axis=2)] + + # apply augmentation 10 times + augs = [] + for _ in range(10): + augmented = aug(image=array)["image"] + + # check that the augmented array is one of the potential flips + which_axes = [np.array_equal(augmented, flip) for flip in flips] + + assert any(which_axes) + augs.append(which_axes.index(True)) + + # check that all flips were applied (first and second flip) + assert set(augs) == {0, 1} + + +def test_flip_mask(ordered_array): + """Test flipping masks in 3D.""" + np.random.seed(42) + + # create array + array: np.ndarray = ordered_array((2, 2, 2, 4)) + mask = array[..., 2:] + array = array[..., :2] + + # create augmentation + aug = NDFlip(p=1, is_3D=True, flip_z=True) + + # potential flips on Y and X axes + array_flips = [np.flip(array, axis=axis) for axis in range(3)] + mask_flips = [np.flip(mask, axis=axis) for axis in range(3)] + + # apply augmentation 10 times + for _ in range(10): + transfo = aug(image=array, mask=mask) + aug_array = transfo["image"] + aug_mask = transfo["mask"] + + # check that the augmented array is one of the potential flips + which_axes = [np.array_equal(aug_array, flip) for flip in array_flips] + assert any(which_axes) + img_axis = which_axes.index(True) + + # same for the masks + which_axes = [np.array_equal(aug_mask, flip) for flip in mask_flips] + assert any(which_axes) + mask_axis = which_axes.index(True) + + # same flip for array and mask + assert img_axis == mask_axis diff --git a/tests/transforms/test_pixel_manipulate.py b/tests/transforms/test_pixel_manipulate.py deleted file mode 100644 index cc23e0db..00000000 --- a/tests/transforms/test_pixel_manipulate.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -import pytest - -from careamics.transforms.pixel_manipulation import ( - default_manipulate, - _get_stratified_coords, - _apply_struct_mask, - median_manipulate -) - - -@pytest.mark.parametrize( - "mask_pixel_perc, shape, num_iterations", - [(0.4, (32, 32), 1000), (0.4, (10, 10, 10), 1000)], -) -def test_get_stratified_coords(mask_pixel_perc, shape, num_iterations): - """Test the get_stratified_coords function. - - Ensure that the array of coordinates is randomly distributed across the - image and doesn't demonstrate any strong pattern. - """ - # Define the dummy array - array = np.zeros(shape) - - # Iterate over the number of iterations and add the coordinates. This is an MC - # simulation to ensure that the coordinates are randomly distributed and not - # biased towards any particular region. - for _ in range(num_iterations): - # Get the coordinates of the pixels to be masked - coords = _get_stratified_coords(mask_pixel_perc, shape) - # Check every pair in the array of coordinates - for coord_pair in coords: - # Check that the coordinates are of the same shape as the patch - assert len(coord_pair) == len(shape) - # Check that the coordinates are positive values - assert all(coord_pair) >= 0 - # Check that the coordinates are within the shape of the array - assert [c <= s for c, s in zip(coord_pair, shape)] - - # Add the 1 to the every coordinate location. - array[tuple(np.array(coords).T.tolist())] += 1 - - # Ensure that there's no strong pattern in the array and sufficient number of - # pixels is masked. - assert np.sum(array == 0) < np.sum(shape) - - -def test_default_manipulate_2d(array_2D: np.ndarray): - """Test the default_manipulate function. - - Ensure that the function returns an array of the same shape as the input. - """ - # Get manipulated patch, original patch and mask - patch, original_patch, mask = default_manipulate(array_2D, 0.5) - - # Add sample dimension to the moch input array - array_2D = array_2D[np.newaxis, ...] - # Check that the shapes of the arrays are the same - assert patch.shape == array_2D.shape - assert original_patch.shape == array_2D.shape - assert mask.shape == array_2D.shape - - # Check that the manipulated patch is different from the original patch - assert not np.array_equal(patch, original_patch) - - -def test_default_manipulate_3d(array_3D: np.ndarray): - """Test the default_manipulate function. - - Ensure that the function returns an array of the same shape as the input. - """ - # Get manipulated patch, original patch and mask - patch, original_patch, mask = default_manipulate(array_3D, 0.5) - - # Add sample dimension to the mock input array - array_3D = array_3D[np.newaxis, ...] - # Check that the shapes of the arrays are the same - assert patch.shape == array_3D.shape - assert original_patch.shape == array_3D.shape - assert mask.shape == array_3D.shape - - # Check that the manipulated patch is different from the original patch - assert not np.array_equal(patch, original_patch) - - -# TODO what is this testing? -@pytest.mark.parametrize("mask", [[[0, 1, 1, 1, 1, 1, 0]]]) -def test_apply_struct_mask(mask): - patch = np.zeros((64, 64)) - coords = _get_stratified_coords(0.2, patch.shape) - patch = _apply_struct_mask(patch, coords, mask) - - -# TODO come up with better tests for that one -@pytest.mark.parametrize("shape", - [ - (8, 8), - (8, 8, 8) - ] -) -def test_median_manipulate_ordered(ordered_array, shape): - array = ordered_array(shape) - patch, original_patch, mask = median_manipulate(array, 5, 0.5) - - # masked array has at least one masked pixel - assert np.any(mask) - - # check that the arrays are different - assert not np.array_equal(patch, original_patch) - diff --git a/tests/transforms/test_pixel_manipulation.py b/tests/transforms/test_pixel_manipulation.py new file mode 100644 index 00000000..8e0c56e4 --- /dev/null +++ b/tests/transforms/test_pixel_manipulation.py @@ -0,0 +1,202 @@ +import numpy as np +import pytest + +from careamics.transforms.pixel_manipulation import ( + _apply_struct_mask, + _get_stratified_coords, + median_manipulate, + uniform_manipulate, +) + + +# TODO: what is the minimum coords given? should make sure that there is at lea +@pytest.mark.parametrize( + "mask_pixel_perc, shape, num_iterations", + [(0.4, (32, 32), 1000), (0.4, (10, 10, 10), 1000)], +) +def test_get_stratified_coords(mask_pixel_perc, shape, num_iterations): + """Test the get_stratified_coords function. + + Ensure that the array of coordinates is randomly distributed across the + image and that most pixels get selected. + """ + # Define the dummy array + array = np.zeros(shape) + + # Iterate over the number of iterations and add the coordinates. This is an MC + # simulation to ensure that the coordinates are randomly distributed and not + # biased towards any particular region. + for _ in range(num_iterations): + # Get the coordinates of the pixels to be masked + coords = _get_stratified_coords(mask_pixel_perc, shape) + + # Check that there is at least one coordinate choosen + assert len(coords) > 0 + + # Check every pair in the array of coordinates + for coord_pair in coords: + # Check that the coordinates are of the same shape as the patch dims + assert len(coord_pair) == len(shape) + + # Check that the coordinates are positive values + assert all(coord_pair) >= 0 + + # Check that the coordinates are within the shape of the array + assert [c <= s for c, s in zip(coord_pair, shape)] + + # Add the 1 to the every coordinate location. + array[tuple(np.array(coords).T.tolist())] += 1 + + # Ensure that there's no strong pattern in the array and sufficient number of + # pixels is masked. + assert np.sum(array == 0) < np.sum(shape) + + +@pytest.mark.parametrize("shape", [(8, 8), (3, 8, 8), (8, 8, 8), (3, 8, 8, 8)]) +def test_uniform_manipulate(ordered_array, shape): + """Test the uniform_manipulate function. + + Ensures that the mask corresponds to the manipulated pixels, and that the + manipulated pixels have a value taken from a ROI surrounding them. + """ + # create the array + patch = ordered_array(shape) + + # manipulate the array + transform_patch, mask = uniform_manipulate( + patch, mask_pixel_percentage=10, subpatch_size=5 + ) + + # find pixels that have different values between patch and transformed patch + diff_coords = np.array(np.where(patch != transform_patch)) + + # find non-zero pixels in the mask + mask_coords = np.array(np.where(mask == 1)) + + # check that the transformed pixels correspond to the masked pixels + assert np.array_equal(diff_coords, mask_coords) + + # for each pixel masked, check that the manipulated pixel value is within the roi + for i in range(mask_coords.shape[-1]): + # get coordinates + coords = mask_coords[..., i] + + # get roi using slice in each dimension + slices = tuple( + [ + slice(max(0, coords[i] - 2), min(shape[i], coords[i] + 3)) + for i in range(-coords.shape[0] + 1, 0) # range -4, -3, -2, -1 + ] + ) + roi = patch[ + (...,) + slices + ] # TODO ellipsis needed bc singleton dim, might need to go away + + # check that the pixel value comes from the actual roi + assert transform_patch[tuple(coords)] in roi + + +@pytest.mark.parametrize("shape", [(8, 8), (3, 8, 8), (8, 8, 8), (3, 8, 8, 8)]) +def test_median_manipulate(ordered_array, shape): + """Test the uniform_manipulate function. + + Ensures that the mask corresponds to the manipulated pixels, and that the + manipulated pixels have a value taken from a ROI surrounding them. + """ + # create the array + patch = ordered_array(shape) + + # manipulate the array + transform_patch, mask = median_manipulate( + patch, subpatch_size=5, mask_pixel_percentage=10 + ) + + # find pixels that have different values between patch and transformed patch + diff_coords = np.array(np.where(patch != transform_patch)) + + # find non-zero pixels in the mask + mask_coords = np.array(np.where(mask == 1)) + + # check that the transformed pixels correspond to the masked pixels + assert np.array_equal(diff_coords, mask_coords) + + # for each pixel masked, check that the manipulated pixel value is within the roi + for i in range(mask_coords.shape[-1]): + # get coordinates + coords = mask_coords[..., i] + + # get roi using slice in each dimension + slices = tuple( + [ + slice(max(0, coords[i] - 2), min(shape[i], coords[i] + 3)) + for i in range(-coords.shape[0] + 1, 0) # range -4, -3, -2, -1 + ] + ) + roi = patch[ + (...,) + slices + ] # TODO ellipsis needed bc singleton dim, might need to go away + + # check that the pixel value comes from the actual roi + assert transform_patch[tuple(coords)] == np.median(roi) + + +@pytest.mark.parametrize( + "coords, struct_mask_params", + [((2, 2), [1, 5]), ((3, 4), [0, 5]), ((9, 0), [0, 5]), (((1, 2), (3, 4)), [1, 5])], +) +def test_apply_struct_mask(coords, struct_mask_params): + """Test the uniform_manipulate function. + + Ensures that the mask corresponds to the manipulated pixels, and that the + manipulated pixels have a value taken from a ROI surrounding them. + """ + # create the array of random integers. This is to ensurewe can compare exact values + patch = np.arange( + 100, + ).reshape((10, 10)) + # make a copy of the original patch for comparison + original_patch = patch.copy() + coords = np.array(coords) + # expand the coords if only one roi is given + if coords.ndim == 1: + coords = coords[None, :] + struct_axis, struct_span = struct_mask_params + # manipulate the array + transform_patch = _apply_struct_mask( + patch, + coords=coords, + mask_params=struct_mask_params, + ) + changed_values = patch[np.where(original_patch != transform_patch)] + # check that the transformed pixels correspond to the masked pixels + transformed = [] + if struct_axis == 0: + for i in range(coords.shape[0]): + column_coords_to_mask = [ + c + for c in range( + max(0, coords[i, 1] - struct_span // 2), + min(transform_patch.shape[1], coords[i, 1] + struct_span // 2) + 1, + ) + if c != coords[i, 1] + ] + transformed.append(transform_patch[coords[i, 0]][column_coords_to_mask]) + assert np.array_equal( + np.sort(changed_values), np.sort(np.concatenate(transformed, axis=0)) + ) + + else: + for i in range(coords.shape[0]): + row_coords_to_mask = [ + c + for c in range( + max(0, coords[i, 0] - struct_span // 2), + min(transform_patch.shape[1], coords[i, 0] + struct_span // 2) + 1, + ) + if c != coords[i, 0] + ] + transformed.append(transform_patch[:, coords[i, 1]][row_coords_to_mask]) + + assert np.array_equal( + np.sort(changed_values), np.sort(np.concatenate(transformed, axis=0)) + ) diff --git a/tests/transforms/test_xy_random_rotate90.py b/tests/transforms/test_xy_random_rotate90.py new file mode 100644 index 00000000..cea51b87 --- /dev/null +++ b/tests/transforms/test_xy_random_rotate90.py @@ -0,0 +1,117 @@ +import numpy as np +import pytest + +from careamics.transforms import XYRandomRotate90 + + +def test_randomness(ordered_array): + """Test randomness of the flipping using the `p` parameter.""" + # create array + array = ordered_array((1, 2, 2, 1)) + + # create augmentation that never applies + aug = XYRandomRotate90(p=0.0) + + # apply augmentation + augmented = aug(image=array)["image"] + assert np.array_equal(augmented, array) + + # create augmentation that always applies + aug = XYRandomRotate90(p=1.0) + + # apply augmentation + augmented = aug(image=array)["image"] + assert not np.array_equal(augmented, array) + + +@pytest.mark.parametrize( + "shape", + [ + # 2D + (2, 2, 1), + (2, 2, 2), + # 3D + (2, 2, 2, 1), + (2, 2, 2, 2), + ], +) +def test_xy_rotate(ordered_array, shape): + """Test rotation for 2D and 3D arrays.""" + np.random.seed(42) + + # create array + array: np.ndarray = ordered_array(shape) + + # create augmentation + is_3D = len(shape) == 4 + aug = XYRandomRotate90(p=1, is_3D=is_3D) + + # potential rotations + axes = (1, 2) if is_3D else (0, 1) + rots = [ + np.rot90(array, k=1, axes=axes), + np.rot90(array, k=2, axes=axes), + np.rot90(array, k=3, axes=axes), + ] + + # apply augmentation 10 times + augs = [] + for _ in range(10): + augmented = aug(image=array)["image"] + + # check that the augmented array is one of the potential rots + which_number = [np.array_equal(augmented, rot) for rot in rots] + + assert any(which_number) + augs.append(which_number.index(True)) + + # check that all rots were applied (indices of rots) + assert set(augs) == {0, 1, 2} + + +def test_mask_rotate(ordered_array): + """Test rotating masks in 3D.""" + np.random.seed(42) + + # create array + array: np.ndarray = ordered_array((2, 2, 2, 4)) + mask = array[..., 2:] + array = array[..., :2] + + # create augmentation + is_3D = len(array.shape) == 4 + aug = XYRandomRotate90(p=1, is_3D=is_3D) + + # potential rotations + axes = (1, 2) + array_rots = [ + np.rot90(array, k=1, axes=axes), + np.rot90(array, k=2, axes=axes), + np.rot90(array, k=3, axes=axes), + ] + mask_rots = [ + np.rot90(mask, k=1, axes=axes), + np.rot90(mask, k=2, axes=axes), + np.rot90(mask, k=3, axes=axes), + ] + + # apply augmentation 10 times + for _ in range(10): + augmented = aug(image=array, mask=mask) + aug_array = augmented["image"] + aug_mask = augmented["mask"] + + # check that the augmented array is one of the potential rots + which_number = [np.array_equal(aug_array, rot) for rot in array_rots] + + assert any(which_number) + img_n_rots = which_number.index(True) + + # same for the masks + which_number = [np.array_equal(aug_mask, rot) for rot in mask_rots] + + assert any(which_number) + mask_n_rots = which_number.index(True) + + # same rot for array and mask + assert img_n_rots == mask_n_rots diff --git a/tests/utils/test_axes.py b/tests/utils/test_axes.py index 33eedf45..88aa0332 100644 --- a/tests/utils/test_axes.py +++ b/tests/utils/test_axes.py @@ -10,8 +10,11 @@ ("yx", True), ("Yx", True), ("Zyx", True), + ("STYX", True), + ("CYX", True), ("TzYX", True), ("SZYX", True), + ("STZYX", True), # Failing due to order ("XY", False), ("YXZ", False), @@ -20,14 +23,9 @@ # too few axes ("", False), ("X", False), - # too many axes - ("STZYX", False), # no yx axes ("ZT", False), ("ZY", False), - # unsupported axes or axes pair - ("STYX", False), - ("CYX", False), # repeating characters ("YYX", False), ("YXY", False), diff --git a/tests/utils/test_base_enum.py b/tests/utils/test_base_enum.py new file mode 100644 index 00000000..a181d5b9 --- /dev/null +++ b/tests/utils/test_base_enum.py @@ -0,0 +1,12 @@ +from careamics.utils.base_enum import BaseEnum + + +class MyEnum(str, BaseEnum): + A = "a" + B = "b" + C = "c" + + +def test_base_enum(): + """Test that BaseEnum allows the `in` operator with values.""" + assert "b" in MyEnum diff --git a/tests/utils/test_method_dispatch.py b/tests/utils/test_method_dispatch.py new file mode 100644 index 00000000..5c7d380d --- /dev/null +++ b/tests/utils/test_method_dispatch.py @@ -0,0 +1,54 @@ +from dataclasses import dataclass +from pathlib import Path + +import numpy as np + +from careamics.utils import method_dispatch + + +@dataclass +class MyObject: + """Simple data class""" + + value: int + + +class MyClass: + """Test class with dispatched methods.""" + + @method_dispatch + def main_function(self, arg: MyObject): + return arg.value + + @main_function.register + def _main_function_with_path(self, arg1: Path, arg2: Path): + min_len = min(len(arg1.name), len(arg2.name)) + return self.main_function(MyObject(min_len)) + + @main_function.register + def _main_function_with_array(self, arg: np.ndarray): + mean = np.mean(arg) + + # pass as keyword argument, important to test that the dispatch works + # with both args and kargs + return self.main_function(arg=MyObject(mean)) + + +def test_method_dispatch(): + """Test that method dispatch works on an instance method, dispatching the call + based on the second argument of the method. + """ + test_class = MyClass() + + # test with TestObject + test_object = MyObject(5) + assert test_class.main_function(test_object) == 5 + + # test with Path + test_path1 = Path("test-longer") + test_path2 = Path("test-short") + assert test_class.main_function(test_path1, test_path2) == len(test_path2.name) + + # test with np.ndarray + test_array = np.array([1, 2, 3]) + assert test_class.main_function(test_array) == 2 diff --git a/tests/test_metrics.py b/tests/utils/test_metrics.py similarity index 100% rename from tests/test_metrics.py rename to tests/utils/test_metrics.py diff --git a/tests/utils/test_torch_utils.py b/tests/utils/test_torch_utils.py index 5178ebe1..3b9c28af 100644 --- a/tests/utils/test_torch_utils.py +++ b/tests/utils/test_torch_utils.py @@ -1,4 +1,5 @@ from torch import optim + from careamics.utils.torch_utils import get_optimizers, get_schedulers @@ -8,7 +9,7 @@ def test_get_schedulers_exist(): """ for scheduler in get_schedulers(): assert hasattr(optim.lr_scheduler, scheduler) - + def test_get_optimizers_exist(): """Test that the function `get_optimizers` return @@ -16,4 +17,3 @@ def test_get_optimizers_exist(): """ for optimizer in get_optimizers(): assert hasattr(optim, optimizer) - diff --git a/tests/utils/test_wandb.py b/tests/utils/test_wandb.py index ec4750c5..0e293f36 100644 --- a/tests/utils/test_wandb.py +++ b/tests/utils/test_wandb.py @@ -1,10 +1,3 @@ -from pathlib import Path -from unittest import mock - -from careamics.config import Configuration -from careamics.utils.wandb import WandBLogging - - # @mock.patch("careamics.utils.wandb.wandb") # def test_wandb_logger(wandb, tmp_path: Path, minimum_config: dict): # config = Configuration(**minimum_config)