diff --git a/.gitignore b/.gitignore index 4124b2bf1..7fe9a046b 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,12 @@ dist/ # Pycharm editor settings .idea +# vscode editor settings +.vscode + +# MacOS +.DS_Store + # project dirs /datasets /models diff --git a/INSTALL.md b/INSTALL.md index 2bdfb7fe3..b1bfaa293 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -61,7 +61,58 @@ unset INSTALL_DIR # or if you are on macOS # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build develop ``` +#### Windows 10 +```bash +open a cmd and change to desired installation directory +from now on will be refered as INSTALL_DIR +conda create --name maskrcnn_benchmark +conda activate maskrcnn_benchmark + +# this installs the right pip and dependencies for the fresh python +conda install ipython + +# maskrcnn_benchmark and coco api dependencies +pip install ninja yacs cython matplotlib tqdm opencv-python + +# follow PyTorch installation in https://pytorch.org/get-started/locally/ +# we give the instructions for CUDA 9.0 +## Important : check the cuda version installed on your computer by running the command in the cmd : +nvcc -- version +conda install -c pytorch pytorch-nightly torchvision cudatoolkit=9.0 + +git clone https://github.com/cocodataset/cocoapi.git + + #To prevent installation error do the following after commiting cocooapi : + #using file explorer naviagate to cocoapi\PythonAPI\setup.py and change line 14 from: + #extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], + #to + #extra_compile_args={'gcc': ['/Qstd=c99']}, + #Based on https://github.com/cocodataset/cocoapi/issues/51 + +cd cocoapi/PythonAPI +python setup.py build_ext install + +# navigate back to INSTALL_DIR +cd .. +cd .. +# install apex + +git clone https://github.com/NVIDIA/apex.git +cd apex +python setup.py install --cuda_ext --cpp_ext +# navigate back to INSTALL_DIR +cd .. +# install PyTorch Detection +git clone https://github.com/Idolized22/maskrcnn-benchmark.git +cd maskrcnn-benchmark + +# the following will install the lib with +# symbolic links, so that you can modify +# the files if you want and won't need to +# re-build it +python setup.py build develop +``` ### Option 2: Docker Image (Requires CUDA, Linux only) Build image with defaults (`CUDA=9.0`, `CUDNN=7`, `FORCE_CUDA=1`): diff --git a/README.md b/README.md index baa7c6bc4..18382906f 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ creating detection and segmentation models using PyTorch 1.0. - **Very fast**: up to **2x** faster than [Detectron](https://github.com/facebookresearch/Detectron) and **30%** faster than [mmdetection](https://github.com/open-mmlab/mmdetection) during training. See [MODEL_ZOO.md](MODEL_ZOO.md) for more details. - **Memory efficient:** uses roughly 500MB less GPU memory than mmdetection during training - **Multi-GPU training and inference** +- **Mixed precision training:** trains faster with less GPU memory on [NVIDIA tensor cores](https://developer.nvidia.com/tensor-cores). - **Batched inference:** can perform inference using multiple images per batch per GPU - **CPU support for inference:** runs on CPU in inference time. See our [webcam demo](demo) for an example - Provides pre-trained models for almost all reference Mask R-CNN and Faster R-CNN configurations with 1x schedule. @@ -152,6 +153,15 @@ python -m torch.distributed.launch --nproc_per_node=$NGPUS /path_to_maskrcnn_ben ``` Note we should set `MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN` follow the rule in Single-GPU training. +### Mixed precision training +We currently use [APEX](https://github.com/NVIDIA/apex) to add [Automatic Mixed Precision](https://developer.nvidia.com/automatic-mixed-precision) support. To enable, just do Single-GPU or Multi-GPU training and set `DTYPE "float16"`. + +```bash +export NGPUS=8 +python -m torch.distributed.launch --nproc_per_node=$NGPUS /path_to_maskrcnn_benchmark/tools/train_net.py --config-file "path/to/config/file.yaml" MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN images_per_gpu x 1000 DTYPE "float16" +``` +If you want more verbose logging, set `AMP_VERBOSE True`. See [Mixed Precision Training guide](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for more details. + ## Evaluation You can test your model directly on single or multiple gpus. Here is an example for Mask R-CNN R-50 FPN with the 1x schedule on 8 GPUS: ```bash diff --git a/configs/test_time_aug/e2e_mask_rcnn_R_50_FPN_1x.yaml b/configs/test_time_aug/e2e_mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 000000000..d1e4a75b6 --- /dev/null +++ b/configs/test_time_aug/e2e_mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,48 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" + BACKBONE: + CONV_BODY: "R-50-FPN" + RESNETS: + BACKBONE_OUT_CHANNELS: 256 + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + BASE_LR: 0.02 + WEIGHT_DECAY: 0.0001 + STEPS: (60000, 80000) + MAX_ITER: 90000 +TEST: + BBOX_AUG: + ENABLED: True + H_FLIP: True + SCALES: (400, 500, 600, 700, 900, 1000, 1100, 1200) + MAX_SIZE: 2000 + SCALE_H_FLIP: True diff --git a/demo/panoptic_segmentation_shapes_dataset_demo.ipynb b/demo/panoptic_segmentation_shapes_dataset_demo.ipynb new file mode 100644 index 000000000..6703fe38a --- /dev/null +++ b/demo/panoptic_segmentation_shapes_dataset_demo.ipynb @@ -0,0 +1,2800 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "objdet.ipynb", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [ + "-N9mxq4OX6Yc", + "aiLvxXRpDbiq", + "xnr8tbDz7WjS", + "2hpTvuSp830x", + "BI2ncK7kATEh", + "F9njOSX0AU5-", + "P8rXzGehNU_g", + "hbzY16ocEdrg", + "If8z4OZfDHmC", + "mOo-0LGFEAmc", + "bbCBInqHFUg7", + "NVjPYFN1Pz6D", + "BTKsrHa-TkGr", + "r-SfVh-qCmhe", + "ONldqRzHUAm0", + "ccHt8YMdKq6K", + "8S_78fk1xLfJ" + ] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "268x1mG64rCy", + "colab_type": "text" + }, + "source": [ + "# Installation" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VNvKG2TF3Y0B", + "colab_type": "code", + "outputId": "37eae83c-0d54-40b0-85fc-4bdd42c64fd6", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "%%writefile setup.sh\n", + "\n", + "# maskrcnn_benchmark and coco api dependencies\n", + "pip install ninja yacs cython matplotlib tqdm opencv-python\n", + "\n", + "# follow PyTorch installation in https://pytorch.org/get-started/locally/\n", + "# we give the instructions for CUDA 9.0\n", + "pip install -c pytorch pytorch-nightly torchvision cudatoolkit=9.0\n", + "\n", + "\n", + "git clone https://github.com/cocodataset/cocoapi.git\n", + "cd cocoapi/PythonAPI\n", + "python setup.py build_ext install\n", + "cd ../../\n", + "\n", + "# install apex\n", + "rm -rf apex\n", + "git clone https://github.com/NVIDIA/apex.git\n", + "cd apex\n", + "git pull\n", + "python setup.py install --cuda_ext --cpp_ext\n", + "cd ../\n", + "\n", + "# install PyTorch Detection\n", + "git clone https://github.com/facebookresearch/maskrcnn-benchmark.git\n", + "cd maskrcnn-benchmark\n", + "\n", + "# the following will install the lib with\n", + "# symbolic links, so that you can modify\n", + "# the files if you want and won't need to\n", + "# re-build it\n", + "python setup.py build develop\n" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing setup.sh\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NYzsp3Ng3mOy", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!sh setup.sh" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LUQbRTRocPNN", + "colab_type": "text" + }, + "source": [ + "### Modify YACS Config" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "cwCmPMeccUzz", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "b2eb5f82-ee23-447a-eab4-fa234fb4ae79" + }, + "source": [ + "%%writefile maskrcnn-benchmark/maskrcnn_benchmark/config/defaults.py\n", + "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n", + "import os\n", + "\n", + "from yacs.config import CfgNode as CN\n", + "\n", + "\n", + "# -----------------------------------------------------------------------------\n", + "# Convention about Training / Test specific parameters\n", + "# -----------------------------------------------------------------------------\n", + "# Whenever an argument can be either used for training or for testing, the\n", + "# corresponding name will be post-fixed by a _TRAIN for a training parameter,\n", + "# or _TEST for a test-specific parameter.\n", + "# For example, the maximum image side during training will be\n", + "# INPUT.MAX_SIZE_TRAIN, while for testing it will be\n", + "# INPUT.MAX_SIZE_TEST\n", + "\n", + "# -----------------------------------------------------------------------------\n", + "# Config definition\n", + "# -----------------------------------------------------------------------------\n", + "\n", + "_C = CN()\n", + "\n", + "_C.MODEL = CN()\n", + "_C.MODEL.RPN_ONLY = False\n", + "_C.MODEL.MASK_ON = False\n", + "_C.MODEL.RETINANET_ON = False\n", + "_C.MODEL.KEYPOINT_ON = False\n", + "_C.MODEL.DEVICE = \"cuda\"\n", + "_C.MODEL.META_ARCHITECTURE = \"GeneralizedRCNN\"\n", + "_C.MODEL.CLS_AGNOSTIC_BBOX_REG = False\n", + "\n", + "# If the WEIGHT starts with a catalog://, like :R-50, the code will look for\n", + "# the path in paths_catalog. Else, it will use it as the specified absolute\n", + "# path\n", + "_C.MODEL.WEIGHT = \"\"\n", + "\n", + "\n", + "# -----------------------------------------------------------------------------\n", + "# INPUT\n", + "# -----------------------------------------------------------------------------\n", + "_C.INPUT = CN()\n", + "# Size of the smallest side of the image during training\n", + "_C.INPUT.MIN_SIZE_TRAIN = (800,) # (800,)\n", + "# Maximum size of the side of the image during training\n", + "_C.INPUT.MAX_SIZE_TRAIN = 1333\n", + "# Size of the smallest side of the image during testing\n", + "_C.INPUT.MIN_SIZE_TEST = 800\n", + "# Maximum size of the side of the image during testing\n", + "_C.INPUT.MAX_SIZE_TEST = 1333\n", + "# Values to be used for image normalization\n", + "_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]\n", + "# Values to be used for image normalization\n", + "_C.INPUT.PIXEL_STD = [1., 1., 1.]\n", + "# Convert image to BGR format (for Caffe2 models), in range 0-255\n", + "_C.INPUT.TO_BGR255 = True\n", + "\n", + "# Image ColorJitter\n", + "_C.INPUT.BRIGHTNESS = 0.0\n", + "_C.INPUT.CONTRAST = 0.0\n", + "_C.INPUT.SATURATION = 0.0\n", + "_C.INPUT.HUE = 0.0\n", + "\n", + "_C.INPUT.VERTICAL_FLIP_PROB_TRAIN = 0.0\n", + "\n", + "# -----------------------------------------------------------------------------\n", + "# Dataset\n", + "# -----------------------------------------------------------------------------\n", + "_C.DATASETS = CN()\n", + "# List of the dataset names for training, as present in paths_catalog.py\n", + "_C.DATASETS.TRAIN = ()\n", + "# List of the dataset names for testing, as present in paths_catalog.py\n", + "_C.DATASETS.TEST = ()\n", + "\n", + "# -----------------------------------------------------------------------------\n", + "# DataLoader\n", + "# -----------------------------------------------------------------------------\n", + "_C.DATALOADER = CN()\n", + "# Number of data loading threads\n", + "_C.DATALOADER.NUM_WORKERS = 4\n", + "# If > 0, this enforces that each collated batch should have a size divisible\n", + "# by SIZE_DIVISIBILITY\n", + "_C.DATALOADER.SIZE_DIVISIBILITY = 0\n", + "# If True, each batch should contain only images for which the aspect ratio\n", + "# is compatible. This groups portrait images together, and landscape images\n", + "# are not batched with portrait images.\n", + "_C.DATALOADER.ASPECT_RATIO_GROUPING = True\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Backbone options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.BACKBONE = CN()\n", + "\n", + "# The backbone conv body to use\n", + "# The string must match a function that is imported in modeling.model_builder\n", + "# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN\n", + "# backbone)\n", + "_C.MODEL.BACKBONE.CONV_BODY = \"R-50-C4\"\n", + "\n", + "# Add StopGrad at a specified stage so the bottom layers are frozen\n", + "_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# FPN options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.FPN = CN()\n", + "_C.MODEL.FPN.USE_GN = False\n", + "_C.MODEL.FPN.USE_RELU = False\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Group Norm options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.GROUP_NORM = CN()\n", + "# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)\n", + "_C.MODEL.GROUP_NORM.DIM_PER_GP = -1\n", + "# Number of groups in GroupNorm (-1 if using DIM_PER_GP)\n", + "_C.MODEL.GROUP_NORM.NUM_GROUPS = 32\n", + "# GroupNorm's small constant in the denominator\n", + "_C.MODEL.GROUP_NORM.EPSILON = 1e-5\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# RPN options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.RPN = CN()\n", + "_C.MODEL.RPN.USE_FPN = False\n", + "# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input\n", + "_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)\n", + "# Stride of the feature map that RPN is attached.\n", + "# For FPN, number of strides should match number of scales\n", + "_C.MODEL.RPN.ANCHOR_STRIDE = (16,)\n", + "# RPN anchor aspect ratios\n", + "_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)\n", + "# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels\n", + "# Set to -1 or a large value, e.g. 100000, to disable pruning anchors\n", + "_C.MODEL.RPN.STRADDLE_THRESH = 0\n", + "# Minimum overlap required between an anchor and ground-truth box for the\n", + "# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD\n", + "# ==> positive RPN example)\n", + "_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7\n", + "# Maximum overlap allowed between an anchor and ground-truth box for the\n", + "# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD\n", + "# ==> negative RPN example)\n", + "_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3\n", + "# Total number of RPN examples per image\n", + "_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256\n", + "# Target fraction of foreground (positive) examples per RPN minibatch\n", + "_C.MODEL.RPN.POSITIVE_FRACTION = 0.5\n", + "# Number of top scoring RPN proposals to keep before applying NMS\n", + "# When FPN is used, this is *per FPN level* (not total)\n", + "_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000\n", + "_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000\n", + "# Number of top scoring RPN proposals to keep after applying NMS\n", + "_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000\n", + "_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000\n", + "# NMS threshold used on RPN proposals\n", + "_C.MODEL.RPN.NMS_THRESH = 0.7\n", + "# Proposal height and width both need to be greater than RPN_MIN_SIZE\n", + "# (a the scale used during training or inference)\n", + "_C.MODEL.RPN.MIN_SIZE = 0\n", + "# Number of top scoring RPN proposals to keep after combining proposals from\n", + "# all FPN levels\n", + "_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000\n", + "_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000\n", + "# Apply the post NMS per batch (default) or per image during training\n", + "# (default is True to be consistent with Detectron, see Issue #672)\n", + "_C.MODEL.RPN.FPN_POST_NMS_PER_BATCH = True\n", + "# Custom rpn head, empty to use default conv or separable conv\n", + "_C.MODEL.RPN.RPN_HEAD = \"SingleConvRPNHead\"\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# ROI HEADS options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.ROI_HEADS = CN()\n", + "_C.MODEL.ROI_HEADS.USE_FPN = False\n", + "# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)\n", + "_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5\n", + "# Overlap threshold for an RoI to be considered background\n", + "# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))\n", + "_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5\n", + "# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets\n", + "# These are empirically chosen to approximately lead to unit variance targets\n", + "_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)\n", + "# RoI minibatch size *per image* (number of regions of interest [ROIs])\n", + "# Total number of RoIs per training minibatch =\n", + "# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH\n", + "# E.g., a common configuration is: 512 * 2 * 8 = 8192\n", + "_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512\n", + "# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)\n", + "_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25\n", + "\n", + "# Only used on test mode\n", + "\n", + "# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to\n", + "# balance obtaining high recall with not having too many low precision\n", + "# detections that will slow down inference post processing steps (like NMS)\n", + "_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05\n", + "# Overlap threshold used for non-maximum suppression (suppress boxes with\n", + "# IoU >= this threshold)\n", + "_C.MODEL.ROI_HEADS.NMS = 0.5\n", + "# Maximum number of detections to return per image (100 is based on the limit\n", + "# established for the COCO dataset)\n", + "_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100\n", + "\n", + "\n", + "_C.MODEL.ROI_BOX_HEAD = CN()\n", + "_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = \"ResNet50Conv5ROIFeatureExtractor\"\n", + "_C.MODEL.ROI_BOX_HEAD.PREDICTOR = \"FastRCNNPredictor\"\n", + "_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14\n", + "_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0\n", + "_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)\n", + "_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81\n", + "# Hidden layer dimension when using an MLP for the RoI box head\n", + "_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024\n", + "# GN\n", + "_C.MODEL.ROI_BOX_HEAD.USE_GN = False\n", + "# Dilation\n", + "_C.MODEL.ROI_BOX_HEAD.DILATION = 1\n", + "_C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256\n", + "_C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4\n", + "\n", + "\n", + "_C.MODEL.ROI_MASK_HEAD = CN()\n", + "_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = \"ResNet50Conv5ROIFeatureExtractor\"\n", + "_C.MODEL.ROI_MASK_HEAD.PREDICTOR = \"MaskRCNNC4Predictor\"\n", + "_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14\n", + "_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0\n", + "_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)\n", + "_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024\n", + "_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)\n", + "_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14\n", + "_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True\n", + "# Whether or not resize and translate masks to the input image.\n", + "_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False\n", + "_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5\n", + "# Dilation\n", + "_C.MODEL.ROI_MASK_HEAD.DILATION = 1\n", + "# GN\n", + "_C.MODEL.ROI_MASK_HEAD.USE_GN = False\n", + "\n", + "_C.MODEL.ROI_KEYPOINT_HEAD = CN()\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.FEATURE_EXTRACTOR = \"KeypointRCNNFeatureExtractor\"\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.PREDICTOR = \"KeypointRCNNPredictor\"\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES = (1.0 / 16,)\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.MLP_HEAD_DIM = 1024\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS = tuple(512 for _ in range(8))\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.RESOLUTION = 14\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.NUM_CLASSES = 17\n", + "_C.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# ResNe[X]t options (ResNets = {ResNet, ResNeXt}\n", + "# Note that parts of a resnet may be used for both the backbone and the head\n", + "# These options apply to both\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.RESNETS = CN()\n", + "\n", + "# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt\n", + "_C.MODEL.RESNETS.NUM_GROUPS = 1\n", + "\n", + "# Baseline width of each group\n", + "_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64\n", + "\n", + "# Place the stride 2 conv on the 1x1 filter\n", + "# Use True only for the original MSRA ResNet; use False for C2 and Torch models\n", + "_C.MODEL.RESNETS.STRIDE_IN_1X1 = True\n", + "\n", + "# Residual transformation function\n", + "_C.MODEL.RESNETS.TRANS_FUNC = \"BottleneckWithFixedBatchNorm\"\n", + "# ResNet's stem function (conv1 and pool1)\n", + "_C.MODEL.RESNETS.STEM_FUNC = \"StemWithFixedBatchNorm\"\n", + "\n", + "# Apply dilation in stage \"res5\"\n", + "_C.MODEL.RESNETS.RES5_DILATION = 1\n", + "\n", + "_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256 * 4\n", + "_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256\n", + "_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64\n", + "\n", + "_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False)\n", + "_C.MODEL.RESNETS.WITH_MODULATED_DCN = False\n", + "_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# RetinaNet Options (Follow the Detectron version)\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.RETINANET = CN()\n", + "\n", + "# This is the number of foreground classes and background.\n", + "_C.MODEL.RETINANET.NUM_CLASSES = 81\n", + "\n", + "# Anchor aspect ratios to use\n", + "_C.MODEL.RETINANET.ANCHOR_SIZES = (32, 64, 128, 256, 512)\n", + "_C.MODEL.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)\n", + "_C.MODEL.RETINANET.ANCHOR_STRIDES = (8, 16, 32, 64, 128)\n", + "_C.MODEL.RETINANET.STRADDLE_THRESH = 0\n", + "\n", + "# Anchor scales per octave\n", + "_C.MODEL.RETINANET.OCTAVE = 2.0\n", + "_C.MODEL.RETINANET.SCALES_PER_OCTAVE = 3\n", + "\n", + "# Use C5 or P5 to generate P6\n", + "_C.MODEL.RETINANET.USE_C5 = True\n", + "\n", + "# Convolutions to use in the cls and bbox tower\n", + "# NOTE: this doesn't include the last conv for logits\n", + "_C.MODEL.RETINANET.NUM_CONVS = 4\n", + "\n", + "# Weight for bbox_regression loss\n", + "_C.MODEL.RETINANET.BBOX_REG_WEIGHT = 4.0\n", + "\n", + "# Smooth L1 loss beta for bbox regression\n", + "_C.MODEL.RETINANET.BBOX_REG_BETA = 0.11\n", + "\n", + "# During inference, #locs to select based on cls score before NMS is performed\n", + "# per FPN level\n", + "_C.MODEL.RETINANET.PRE_NMS_TOP_N = 1000\n", + "\n", + "# IoU overlap ratio for labeling an anchor as positive\n", + "# Anchors with >= iou overlap are labeled positive\n", + "_C.MODEL.RETINANET.FG_IOU_THRESHOLD = 0.5\n", + "\n", + "# IoU overlap ratio for labeling an anchor as negative\n", + "# Anchors with < iou overlap are labeled negative\n", + "_C.MODEL.RETINANET.BG_IOU_THRESHOLD = 0.4\n", + "\n", + "# Focal loss parameter: alpha\n", + "_C.MODEL.RETINANET.LOSS_ALPHA = 0.25\n", + "\n", + "# Focal loss parameter: gamma\n", + "_C.MODEL.RETINANET.LOSS_GAMMA = 2.0\n", + "\n", + "# Prior prob for the positives at the beginning of training. This is used to set\n", + "# the bias init for the logits layer\n", + "_C.MODEL.RETINANET.PRIOR_PROB = 0.01\n", + "\n", + "# Inference cls score threshold, anchors with score > INFERENCE_TH are\n", + "# considered for inference\n", + "_C.MODEL.RETINANET.INFERENCE_TH = 0.05\n", + "\n", + "# NMS threshold used in RetinaNet\n", + "_C.MODEL.RETINANET.NMS_TH = 0.4\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# FBNet options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.FBNET = CN()\n", + "_C.MODEL.FBNET.ARCH = \"default\"\n", + "# custom arch\n", + "_C.MODEL.FBNET.ARCH_DEF = \"\"\n", + "_C.MODEL.FBNET.BN_TYPE = \"bn\"\n", + "_C.MODEL.FBNET.SCALE_FACTOR = 1.0\n", + "# the output channels will be divisible by WIDTH_DIVISOR\n", + "_C.MODEL.FBNET.WIDTH_DIVISOR = 1\n", + "_C.MODEL.FBNET.DW_CONV_SKIP_BN = True\n", + "_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True\n", + "\n", + "# > 0 scale, == 0 skip, < 0 same dimension\n", + "_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0\n", + "_C.MODEL.FBNET.DET_HEAD_BLOCKS = []\n", + "# overwrite the stride for the head, 0 to use original value\n", + "_C.MODEL.FBNET.DET_HEAD_STRIDE = 0\n", + "\n", + "# > 0 scale, == 0 skip, < 0 same dimension\n", + "_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0\n", + "_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []\n", + "# overwrite the stride for the head, 0 to use original value\n", + "_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0\n", + "\n", + "# > 0 scale, == 0 skip, < 0 same dimension\n", + "_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0\n", + "_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []\n", + "# overwrite the stride for the head, 0 to use original value\n", + "_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0\n", + "\n", + "# 0 to use all blocks defined in arch_def\n", + "_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0\n", + "_C.MODEL.FBNET.RPN_BN_TYPE = \"\"\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Solver\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.SOLVER = CN()\n", + "_C.SOLVER.MAX_ITER = 40000\n", + "\n", + "_C.SOLVER.BASE_LR = 0.001\n", + "_C.SOLVER.BIAS_LR_FACTOR = 2\n", + "\n", + "_C.SOLVER.MOMENTUM = 0.9\n", + "\n", + "_C.SOLVER.WEIGHT_DECAY = 0.0005\n", + "_C.SOLVER.WEIGHT_DECAY_BIAS = 0\n", + "\n", + "_C.SOLVER.GAMMA = 0.1\n", + "_C.SOLVER.STEPS = (30000,)\n", + "\n", + "_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n", + "_C.SOLVER.WARMUP_ITERS = 500\n", + "_C.SOLVER.WARMUP_METHOD = \"linear\"\n", + "\n", + "_C.SOLVER.CHECKPOINT_PERIOD = 2500\n", + "\n", + "# Number of images per batch\n", + "# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will\n", + "# see 2 images per batch\n", + "_C.SOLVER.IMS_PER_BATCH = 16\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Specific test options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.TEST = CN()\n", + "_C.TEST.EXPECTED_RESULTS = []\n", + "_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4\n", + "# Number of images per batch\n", + "# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will\n", + "# see 2 images per batch\n", + "_C.TEST.IMS_PER_BATCH = 8\n", + "# Number of detections per image\n", + "_C.TEST.DETECTIONS_PER_IMG = 100\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Test-time augmentations for bounding box detection\n", + "# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_1x.yaml for an example\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.TEST.BBOX_AUG = CN()\n", + "\n", + "# Enable test-time augmentation for bounding box detection if True\n", + "_C.TEST.BBOX_AUG.ENABLED = False\n", + "\n", + "# Horizontal flip at the original scale (id transform)\n", + "_C.TEST.BBOX_AUG.H_FLIP = False\n", + "\n", + "# Each scale is the pixel size of an image's shortest side\n", + "_C.TEST.BBOX_AUG.SCALES = ()\n", + "\n", + "# Max pixel size of the longer side\n", + "_C.TEST.BBOX_AUG.MAX_SIZE = 4000\n", + "\n", + "# Horizontal flip at each scale\n", + "_C.TEST.BBOX_AUG.SCALE_H_FLIP = False\n", + "\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Misc options\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.OUTPUT_DIR = \".\"\n", + "\n", + "_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), \"paths_catalog.py\")\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Precision options\n", + "# ---------------------------------------------------------------------------- #\n", + "\n", + "# Precision of input, allowable: (float32, float16)\n", + "_C.DTYPE = \"float32\"\n", + "\n", + "# Enable verbosity in apex.amp\n", + "_C.AMP_VERBOSE = False\n", + "\n", + "# ---------------------------------------------------------------------------- #\n", + "# Panoptic FPN\n", + "# ---------------------------------------------------------------------------- #\n", + "_C.MODEL.PANOPTIC = CN()\n", + "_C.MODEL.PANOPTIC.CHANNEL_SIZE = 128\n", + "_C.MODEL.PANOPTIC.NUM_CLASSES = 1\n" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Overwriting maskrcnn-benchmark/maskrcnn_benchmark/config/defaults.py\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1uoPMGDl49Wk", + "colab_type": "text" + }, + "source": [ + "### Checking our Installation\n", + "\n", + "If a module not found error appears, restart the runtime. The libraries should be loaded after restarting" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3q-n76S95KA3", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 299 + }, + "outputId": "39d390c1-1b71-4c9c-88e6-1d78e4267ce6" + }, + "source": [ + "import maskrcnn_benchmark" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "error", + "ename": "ModuleNotFoundError", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mmaskrcnn_benchmark\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'maskrcnn_benchmark'", + "", + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aiLvxXRpDbiq", + "colab_type": "text" + }, + "source": [ + "# Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kLzesfGNX9O2", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch\n", + "from torch import nn\n", + "import torch.nn.functional as Fx\n", + "import datetime\n", + "\n", + "# Set up custom environment before nearly anything else is imported\n", + "# NOTE: this should be the first import (no not reorder)\n", + "from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n", + "\n", + "from maskrcnn_benchmark.data.build import *\n", + "from maskrcnn_benchmark.structures.bounding_box import BoxList\n", + "from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\n", + "from maskrcnn_benchmark import layers as L\n", + "from maskrcnn_benchmark.utils import cv2_util\n", + "from maskrcnn_benchmark.utils.miscellaneous import mkdir\n", + "from maskrcnn_benchmark.utils.logger import setup_logger\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.data import make_data_loader\n", + "from maskrcnn_benchmark.solver import make_lr_scheduler\n", + "from maskrcnn_benchmark.solver import make_optimizer\n", + "from maskrcnn_benchmark.engine.inference import inference\n", + "from maskrcnn_benchmark.engine.trainer import do_train\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.utils.collect_env import collect_env_info\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.utils.imports import import_file\n", + "from maskrcnn_benchmark.data.datasets.evaluation import evaluate\n", + "from maskrcnn_benchmark.utils.comm import is_main_process, get_world_size\n", + "from maskrcnn_benchmark.utils.comm import all_gather\n", + "from maskrcnn_benchmark.utils.timer import Timer, get_time_str\n", + "from maskrcnn_benchmark.engine.inference import compute_on_dataset, _accumulate_predictions_from_multiple_gpus\n", + "from maskrcnn_benchmark.data.datasets.evaluation.coco import coco_evaluation\n", + "from maskrcnn_benchmark.modeling.utils import cat\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "\n", + "from maskrcnn_benchmark.modeling.backbone import build_backbone\n", + "from maskrcnn_benchmark.modeling.rpn.rpn import build_rpn\n", + "from maskrcnn_benchmark.modeling.roi_heads.roi_heads import build_roi_heads\n", + "from maskrcnn_benchmark.modeling.make_layers import make_conv3x3\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "from maskrcnn_benchmark.modeling.backbone import build_backbone\n", + "from maskrcnn_benchmark.modeling.rpn.rpn import build_rpn\n", + "from maskrcnn_benchmark.modeling.roi_heads.roi_heads import build_roi_heads\n", + "\n", + "import torch.distributed as dist\n", + "\n", + "from maskrcnn_benchmark.utils.comm import get_world_size\n", + "from maskrcnn_benchmark.utils.metric_logger import MetricLogger\n", + "\n", + "\n", + "from PIL import Image\n", + "import json\n", + "import logging\n", + "import torch\n", + "import numpy as np\n", + "import skimage.draw as draw\n", + "import tempfile\n", + "from pycocotools.coco import COCO\n", + "import os\n", + "import sys\n", + "import random\n", + "import math\n", + "import re\n", + "import time\n", + "import cv2\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "from tqdm import tqdm\n", + "\n", + "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n", + "from torchvision import transforms as T\n", + "from torchvision.transforms import functional as F\n", + "from google.colab.patches import cv2_imshow\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DvU-NYKJ3uzb", + "colab_type": "text" + }, + "source": [ + "# Loading Our Dataset\n", + "\n", + "To train a network using the MaskRCNN repo, we first need to define our dataset. The dataset needs to a class of type object and should extend 3 things. \n", + "\n", + "1. **__getitem__(self, idx)**: This function should return a PIL Image, a BoxList and the idx. The Boxlist is an abstraction for our bounding boxes, segmentation masks, class lables and also people keypoints. Please check ABSTRACTIONS.ms for more details on this. \n", + "\n", + "2. **__len__()**: returns the length of the dataset. \n", + "\n", + "3. **get_img_info(self, idx)**: Return a dict of img info with the fields \"height\" and \"width\" filled in with the idx's image's height and width.\n", + "\n", + "4. **self.coco**: Should be a variable that holds the COCO object for your annotations so that you can perform evaluations of your dataset. \n", + "\n", + "5. **self.id_to_img_map**: Is a dictionary that maps the ids to coco image ids. Almost in all cases just map the idxs to idxs. This is simply a requirement for the coco evaluation. \n", + "\n", + "6. **self.contiguous_category_id_to_json_id**: Another requirement for coco evaluation. It maps the categpry to json category id. Again, for almost all purposes category id and json id should be same. \n", + "\n", + "Given below is a sample fo a dataset. It is the Shape Dataset taken from the Matterport Mask RCNN Repo. One important detail is that the constructor if the dataset should have the variable transforms that is set inside the constructor. It should thgen be used inside **__get__item(idx)** as shown below." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xnr8tbDz7WjS", + "colab_type": "text" + }, + "source": [ + "## Helper Functions" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "tb_5MERf7c_1", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Helper Functions for the Shapes Dataset\n", + "\n", + "def non_max_suppression(boxes, scores, threshold):\n", + " \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n", + " boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n", + " scores: 1-D array of box scores.\n", + " threshold: Float. IoU threshold to use for filtering.\n", + " \"\"\"\n", + " assert boxes.shape[0] > 0\n", + " if boxes.dtype.kind != \"f\":\n", + " boxes = boxes.astype(np.float32)\n", + "\n", + " # Compute box areas\n", + " y1 = boxes[:, 0]\n", + " x1 = boxes[:, 1]\n", + " y2 = boxes[:, 2]\n", + " x2 = boxes[:, 3]\n", + " area = (y2 - y1) * (x2 - x1)\n", + "\n", + " # Get indicies of boxes sorted by scores (highest first)\n", + " ixs = scores.argsort()[::-1]\n", + "\n", + " pick = []\n", + " while len(ixs) > 0:\n", + " # Pick top box and add its index to the list\n", + " i = ixs[0]\n", + " pick.append(i)\n", + " # Compute IoU of the picked box with the rest\n", + " iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n", + " # Identify boxes with IoU over the threshold. This\n", + " # returns indices into ixs[1:], so add 1 to get\n", + " # indices into ixs.\n", + " remove_ixs = np.where(iou > threshold)[0] + 1\n", + " # Remove indices of the picked and overlapped boxes.\n", + " ixs = np.delete(ixs, remove_ixs)\n", + " ixs = np.delete(ixs, 0)\n", + " return np.array(pick, dtype=np.int32)\n", + "\n", + "def compute_iou(box, boxes, box_area, boxes_area):\n", + " \"\"\"Calculates IoU of the given box with the array of the given boxes.\n", + " box: 1D vector [y1, x1, y2, x2]\n", + " boxes: [boxes_count, (y1, x1, y2, x2)]\n", + " box_area: float. the area of 'box'\n", + " boxes_area: array of length boxes_count.\n", + " Note: the areas are passed in rather than calculated here for\n", + " efficiency. Calculate once in the caller to avoid duplicate work.\n", + " \"\"\"\n", + " # Calculate intersection areas\n", + " y1 = np.maximum(box[0], boxes[:, 0])\n", + " y2 = np.minimum(box[2], boxes[:, 2])\n", + " x1 = np.maximum(box[1], boxes[:, 1])\n", + " x2 = np.minimum(box[3], boxes[:, 3])\n", + " intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n", + " union = box_area + boxes_area[:] - intersection[:]\n", + " iou = intersection / union\n", + " return iou" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5DC0K7tW7d-M", + "colab_type": "text" + }, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WhG_Tu9ELAsj", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class ShapeDataset(object):\n", + " \n", + " def __init__(self, num_examples, transforms=None):\n", + " \n", + " self.height = 128\n", + " self.width = 128\n", + " \n", + " self.num_examples = num_examples\n", + " self.transforms = transforms # IMPORTANT, DON'T MISS\n", + " self.image_info = []\n", + " self.logger = logging.getLogger(__name__)\n", + " \n", + " # Class Names: Note that the ids start fromm 1 not 0. This repo uses the 0 index for background\n", + " self.class_names = {\"square\": 1, \"circle\": 2, \"triangle\": 3}\n", + " \n", + " # Add images\n", + " # Generate random specifications of images (i.e. color and\n", + " # list of shapes sizes and locations). This is more compact than\n", + " # actual images. Images are generated on the fly in load_image().\n", + " for i in range(num_examples):\n", + " bg_color, shapes = self.random_image(self.height, self.width)\n", + " self.image_info.append({ \"path\":None,\n", + " \"width\": self.width, \"height\": self.height,\n", + " \"bg_color\": bg_color, \"shapes\": shapes\n", + " })\n", + " \n", + " # Fills in the self.coco varibale for evaluation.\n", + " self.get_gt()\n", + " \n", + " # Variables needed for coco mAP evaluation\n", + " self.id_to_img_map = {}\n", + " for i, _ in enumerate(self.image_info):\n", + " self.id_to_img_map[i] = i\n", + "\n", + " self.contiguous_category_id_to_json_id = { 0:0 ,1:1, 2:2, 3:3 }\n", + " \n", + "\n", + " def random_shape(self, height, width):\n", + " \"\"\"Generates specifications of a random shape that lies within\n", + " the given height and width boundaries.\n", + " Returns a tuple of three valus:\n", + " * The shape name (square, circle, ...)\n", + " * Shape color: a tuple of 3 values, RGB.\n", + " * Shape dimensions: A tuple of values that define the shape size\n", + " and location. Differs per shape type.\n", + " \"\"\"\n", + " # Shape\n", + " shape = random.choice([\"square\", \"circle\", \"triangle\"])\n", + " # Color\n", + " color = tuple([random.randint(0, 255) for _ in range(3)])\n", + " # Center x, y\n", + " buffer = 20\n", + " y = random.randint(buffer, height - buffer - 1)\n", + " x = random.randint(buffer, width - buffer - 1)\n", + " # Size\n", + " s = random.randint(buffer, height//4)\n", + " return shape, color, (x, y, s)\n", + "\n", + " def random_image(self, height, width):\n", + " \"\"\"Creates random specifications of an image with multiple shapes.\n", + " Returns the background color of the image and a list of shape\n", + " specifications that can be used to draw the image.\n", + " \"\"\"\n", + " # Pick random background color\n", + " bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n", + " # Generate a few random shapes and record their\n", + " # bounding boxes\n", + " shapes = []\n", + " boxes = []\n", + " N = random.randint(1, 4)\n", + " labels = {}\n", + " for _ in range(N):\n", + " shape, color, dims = self.random_shape(height, width)\n", + " shapes.append((shape, color, dims))\n", + " x, y, s = dims\n", + " boxes.append([y-s, x-s, y+s, x+s])\n", + "\n", + " # Apply non-max suppression wit 0.3 threshold to avoid\n", + " # shapes covering each other\n", + " keep_ixs = non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n", + " shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n", + " \n", + " return bg_color, shapes\n", + " \n", + " \n", + " def draw_shape(self, image, shape, dims, color):\n", + " \"\"\"Draws a shape from the given specs.\"\"\"\n", + " # Get the center x, y and the size s\n", + " x, y, s = dims\n", + " if shape == 'square':\n", + " cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n", + " elif shape == \"circle\":\n", + " cv2.circle(image, (x, y), s, color, -1)\n", + " elif shape == \"triangle\":\n", + " points = np.array([[(x, y-s),\n", + " (x-s/math.sin(math.radians(60)), y+s),\n", + " (x+s/math.sin(math.radians(60)), y+s),\n", + " ]], dtype=np.int32)\n", + " cv2.fillPoly(image, points, color)\n", + " return image, [ x-s, y-s, x+s, y+s]\n", + "\n", + "\n", + " def load_mask(self, image_id):\n", + " \"\"\"\n", + " Generates instance masks for shapes of the given image ID.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " shapes = info['shapes']\n", + " count = len(shapes)\n", + " mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n", + " boxes = []\n", + " \n", + " for i, (shape, _, dims) in enumerate(info['shapes']):\n", + " mask[:, :, i:i+1], box = self.draw_shape( mask[:, :, i:i+1].copy(),\n", + " shape, dims, 1)\n", + " boxes.append(box)\n", + " \n", + " \n", + " # Handle occlusions\n", + " occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n", + " for i in range(count-2, -1, -1):\n", + " mask[:, :, i] = mask[:, :, i] * occlusion\n", + " occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n", + " \n", + " segmentation_mask = mask.copy()\n", + " segmentation_mask = np.expand_dims(np.sum(segmentation_mask, axis=2), axis=2)\n", + " \n", + " # Map class names to class IDs.\n", + " class_ids = np.array([self.class_names[s[0]] for s in shapes])\n", + " return segmentation_mask.astype(np.uint8), mask.astype(np.uint8), class_ids.astype(np.int32), boxes\n", + " \n", + " def load_image(self, image_id):\n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n", + " image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n", + " image = image * bg_color.astype(np.uint8)\n", + " for shape, color, dims in info['shapes']:\n", + " image, _ = self.draw_shape(image, shape, dims, color)\n", + " return image\n", + " \n", + " def __getitem__(self, idx):\n", + " \n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " image = Image.fromarray(self.load_image(idx))\n", + " segmentation_mask, masks, labels, boxes = self.load_mask(idx)\n", + " \n", + " # create a BoxList from the boxes\n", + " boxlist = BoxList(boxes, image.size, mode=\"xyxy\")\n", + "\n", + " # add the labels to the boxlist\n", + " boxlist.add_field(\"labels\", torch.tensor(labels))\n", + "\n", + " # Add masks to the boxlist\n", + " masks = np.transpose(masks, (2,0,1))\n", + " masks = SegmentationMask(torch.tensor(masks), image.size, \"mask\")\n", + " boxlist.add_field(\"masks\", masks)\n", + " \n", + " # Add semantic segmentation masks to the boxlist for panoptic segmentation\n", + " segmentation_mask = np.transpose(segmentation_mask, (2,0,1))\n", + " seg_masks = SegmentationMask(torch.tensor(segmentation_mask), image.size, \"mask\")\n", + " boxlist.add_field(\"seg_masks\", seg_masks)\n", + " \n", + " # Important line! dont forget to add this\n", + " if self.transforms:\n", + " image, boxlist = self.transforms(image, boxlist)\n", + "\n", + " # return the image, the boxlist and the idx in your dataset\n", + " return image, boxlist, idx\n", + " \n", + " \n", + " def __len__(self):\n", + " return self.num_examples\n", + " \n", + "\n", + " def get_img_info(self, idx):\n", + " # get img_height and img_width. This is used if\n", + " # we want to split the batches according to the aspect ratio\n", + " # of the image, as it can be more efficient than loading the\n", + " # image from disk\n", + "\n", + " return {\"height\": self.height, \"width\": self.width}\n", + " \n", + " def get_gt(self):\n", + " # Prepares dataset for coco eval\n", + " \n", + " \n", + " images = []\n", + " annotations = []\n", + " results = []\n", + " \n", + " # Define categories\n", + " categories = [ {\"id\": 1, \"name\": \"square\"}, {\"id\": 2, \"name\": \"circle\"}, {\"id\": 3, \"name\": \"triangle\"}]\n", + "\n", + "\n", + " i = 1\n", + " ann_id = 0\n", + "\n", + " for img_id, d in enumerate(self.image_info):\n", + "\n", + " images.append( {\"id\": img_id, 'height': self.height, 'width': self.width } )\n", + "\n", + " for (shape, color, dims) in d['shapes']:\n", + " \n", + " if shape == \"square\":\n", + " category_id = 1\n", + " elif shape == \"circle\":\n", + " category_id = 2\n", + " elif shape == \"triangle\":\n", + " category_id = 3\n", + " \n", + " x, y, s = dims\n", + " bbox = [ x - s, y - s, x+s, y +s ] \n", + " area = (bbox[0] - bbox[2]) * (bbox[1] - bbox[3])\n", + " \n", + " # Format for COCOC\n", + " annotations.append( {\n", + " \"id\": int(ann_id),\n", + " \"category_id\": category_id,\n", + " \"image_id\": int(img_id),\n", + " \"area\" : float(area),\n", + " \"bbox\": [ float(bbox[0]), float(bbox[1]), float(bbox[2]) - float(bbox[0]) + 1, float(bbox[3]) - float(bbox[1]) + 1 ], # note that the bboxes are in x, y , width, height format\n", + " \"iscrowd\" : 0\n", + " } )\n", + "\n", + " ann_id += 1\n", + "\n", + " # Save ground truth file\n", + " \n", + " with open(\"tmp_gt.json\", \"w\") as f:\n", + " json.dump({\"images\": images, \"annotations\": annotations, \"categories\": categories }, f)\n", + "\n", + " # Load gt for coco eval\n", + " self.coco = COCO(\"tmp_gt.json\") \n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2hpTvuSp830x", + "colab_type": "text" + }, + "source": [ + "## Visualise Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BI2ncK7kATEh", + "colab_type": "text" + }, + "source": [ + "### Load" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6nsO_MRUbBpk", + "colab_type": "code", + "outputId": "6a7f5e13-8ba2-4587-e6bc-abe078c0e308", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 105 + } + }, + "source": [ + "train_dt = ShapeDataset(100)\n", + "im, boxlist, idx = train_dt[0]" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F9njOSX0AU5-", + "colab_type": "text" + }, + "source": [ + "### Display some sample Images" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nMXB9sAW994F", + "colab_type": "code", + "outputId": "d4702873-36ec-430e-c2c9-8bb4b58aa84b", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + } + }, + "source": [ + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " im, boxlist, idx = train_dt[i]\n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(im)\n", + "plt.show()\n", + " " + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X2wXXV97/H3twkEkiDhqblpgia2\nVIc6tjBnkI7UcUQrUkro1DowThuVO7FTrFrb0Vg7g3emzkhttXZuq6aCxA4FEfWSuYNVpFivM5fU\ngMjzQ0QekhsIPiAP0Uj0e//Y69RNOIeTvddae/323u/XzJmz92+vffb3rJxvPuf3W2uvE5mJJEkq\nxy90XYAkSXomw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSpMa+EcEWdExN0RsSMiNrX1OpLaZS9L\noxdtvM85IhYB9wCvAXYC3wDOy8w7Gn8xSa2xl6VutDVzPgXYkZn3ZeZPgCuA9S29lqT22MtSBxa3\n9HVXAw/13d8JvGy+jZcsOSSXLjuspVKk8fTYD578bmYe13EZA/UywNIVkUf+t1ZrksbKDx+GvY9l\nDPKctsJ5QRGxEdgIcPjSJbzyt0/qqhSpSP/rM//nga5rOFj9/fy8lfCWTy7quCKpHJf8958O/Jy2\nlrV3Acf33V9Tjf2XzNycmTOZObNkySEtlSGppgV7GZ7Zz0tXjKw2aWK1Fc7fAE6IiHURcShwLrC1\npdeS1B57WepAK8vambk/It4GfAlYBFySmbe38VqS2mMvS91o7ZhzZl4DXNPW15c0GvayNHpeIUyS\npMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCG\nsyRJhTGcJUkqjOEsSVJhDGdJkgpjOEuSVBjDeULd9dm13PXZtV2XIUkaguE8gfpD2YCWpPFjOEuS\nVBjDWZKkwizuugA1Z74l7NnxF//B/SOrRZI0PGfOkiQVZuhwjojjI+L6iLgjIm6PiHdU40dHxLUR\ncW/1+ajmypXUBvtZKkudmfN+4M8z80TgVOCCiDgR2ARcl5knANdV99Wig33blGdu6znYz1JBhg7n\nzNydmTdVt58A7gRWA+uBLdVmW4Bz6hYpqV32s1SWRo45R8Ra4CRgG7AyM3dXDz0MrGziNTQ3Z8Nq\nmv0sda92OEfEcuBzwDsz8/H+xzIzgZzneRsjYntEbN+37+m6ZeggeeUwPZcm+nnvYyMoVJpwtcI5\nIg6h18iXZebnq+FHImJV9fgqYM9cz83MzZk5k5kzS5YcUqcMSQ1oqp+XrhhNvdIkq3O2dgAXA3dm\n5of7HtoKbKhubwCuHr48zafuDNjZs/rZz1JZ6lyE5OXAHwK3RsTN1dhfAh8EroyI84EHgDfUK1HS\nCNjPUkGGDufM/DoQ8zx8+rBfV9Lo2c9SWbxC2Bhqaknak8MkqUyGsyRJhfEPX4wRZ7mSNB2cOcvQ\nl6TCGM6SJBXGcB4Tzm4laXoYzgI8c1uSSmI4S5JUGM/WLtyoZ7Ozr/fiP7h/pK8rSfo5Z86SJBXG\ncJYkqTCGc8G6PEHLk8MkqTsecy6QwShJ082ZsyRJhTGcNS/f+yxJ3TCcJUkqjOFcGGeqkiTDWQvy\nFwZJGi3DWZKkwvhWqkI4O5UkzTKcOzYuoew1tyVpdFzWliSpMLXDOSIWRcQ3I+J/V/fXRcS2iNgR\nEZ+JiEPrl6lSjMtMX8Oxn6UyNDFzfgdwZ9/9i4CPZOavAD8Azm/gNSaSQacC2c9SAWqFc0SsAX4H\n+GR1P4BXAVdVm2wBzqnzGpJGw36WylF35vz3wLuBn1X3jwEey8z91f2dwOq5nhgRGyNie0Rs37fv\n6ZplaJS8rOfEaqSf9z7WfqHSpBs6nCPiLGBPZt44zPMzc3NmzmTmzJIlhwxbxlgy3FSaJvt56YqG\ni5OmUJ23Ur0cODsizgQOA54HfBRYERGLq9+21wC76pcpqWX2s1SQoWfOmfnezFyTmWuBc4F/z8w3\nAtcDr6822wBcXbvKCTJJM+ZJ+l6mnf0slaWN9zm/B3hXROygd8zq4hZeQ9Jo2M9SBxq5QlhmfhX4\nanX7PuCUJr6upNGzn6XuefnOEZnUJWAv6ylJzfPynZIkFcZwViMmdWVAkrpgOI+AwSVJGoThLElS\nYTwhrEXTNmP25DBJaoYzZ0mSCmM4S5JUGMO5JdO2pN3PP+whSfUYzpIkFcZwliSpMIZzw1zS/Tn3\ngyQNx3BWq/xlRZIGZzhLklQYw7khzhAlDeNll9zUdQkqkOEsSVJhvHxnA5wxL+yuz671sp5Sn/4Z\n8+ztbW85uatyVBhnzpIkFcZwliSpMIZzDZ4ENhj3l9Qz30lgnhymWYazJEmFMZwlSSpMrXCOiBUR\ncVVE3BURd0bEb0bE0RFxbUTcW30+qqliS+Ly7PDcd2Wa5n4elZddctOCS9cHs40mX923Un0U+LfM\nfH1EHAosBf4SuC4zPxgRm4BNwHtqvk5xfFuQJtDU9vMoGLgaxNAz54g4EngFcDFAZv4kMx8D1gNb\nqs22AOfULVJSu+xnqSx1lrXXAY8Cn4qIb0bEJyNiGbAyM3dX2zwMrJzryRGxMSK2R8T2ffuerlGG\npAY01s97HxtRxRPO5e3pViecFwMnAx/LzJOAp+gtef2XzEwg53pyZm7OzJnMnFmy5JAaZUhqQGP9\nvHRF67WOlboha0BPpzrhvBPYmZnbqvtX0WvuRyJiFUD1eU+9EiWNgP0sFWTocM7Mh4GHIuJF1dDp\nwB3AVmBDNbYBuLpWhZJaZz9LZal7tvafApdVZ3beB7yZXuBfGRHnAw8Ab6j5GpJGw35uWFNL0v5h\njOlTK5wz82ZgZo6HTq/zdSWNnv0slcM/GSlJDfMkrmd6zYNnAnDt86/puJLx4eU7JUkqjDNnSRoT\nL7vkpuKOO8/Oipva1tl1j+EsSQ2ZluXsQQK5ztee5qB2WVuSpMI4c5akMdLV26ranC0fzGtO2yza\ncJakBox6Sbvt489dhPFzmbagdllbkqTCOHOWpBom8SSw0mbNB5qG9007c5akMdX0n5V8zYNnFh/M\n/cap1kEZzpIkFcZwlqQhTdKS9rjOQsdttn+wDGdJGnN1fkmYlHCbhO+hn+EsSVJhPFtbkgY0ScvZ\nKpMzZ0kaQKnBPMyZ25O2FDwpS/RgOEuSVBzDWZImSKkzew3GcJYkqTCeECZJB2GSZqSTclx2Pq95\n8Myxv7SnM2dJmjBNX9ZTo2c4S5JUmFrhHBF/FhG3R8RtEXF5RBwWEesiYltE7IiIz0TEoU0VK6k9\n9vP8JmkWOulL2rPG/W1VQ4dzRKwG3g7MZOZLgEXAucBFwEcy81eAHwDnN1GopPbYz5PJ5e3xVXdZ\nezFweEQsBpYCu4FXAVdVj28Bzqn5GpJGw36WCjH02dqZuSsi/hZ4EPgR8GXgRuCxzNxfbbYTWD3X\n8yNiI7AR4PClS4YtQ1IDmuzn561sv95RcdaprtRZ1j4KWA+sA34JWAaccbDPz8zNmTmTmTNLlhwy\nbBmSGtBkPy9d0VKRIzRpy8GT9L1MizrL2q8GvpOZj2bm08DngZcDK6plMYA1wK6aNUpqn/0sFaRO\nOD8InBoRSyMigNOBO4DrgddX22wArq5XoqQRsJ8n3KStBky6ocM5M7fRO1HkJuDW6mttBt4DvCsi\ndgDHABc3UKekFtnPP2eAqQS1Lt+ZmRcCFx4wfB9wSp2vK2n07GepHF4hTJKmyONf+euuSxipcb0Y\niX/4QpJwOXtSjesfwHDmLElSYQxnSVNv2mbNj3/lr6dueXvcGM6SJBXGcJYkqTCGs6SpNe0X5nBp\nu1yGsyRJhTGcJWmKeXJYmQxnSVNpmpezVT7DWZKkwniFMElTxRnz3B7/yl/zvFf/VddlNGZcrww2\ny5mzJEmFMZwlSSqM4Sxparik/dwm5cztcV/SBsNZkqTiGM6SJBXGs7UlTTyXswczrmduT8Jy9izD\nWdLE2/aWk7suoVivefDMrkvQHFzWliSpMIazJE2xa59/zUQsB0/C99DPcJYkqTALhnNEXBIReyLi\ntr6xoyPi2oi4t/p8VDUeEfEPEbEjIm6JCA/0SAWxnzWfcZ15TsrM/0AHM3O+FDjjgLFNwHWZeQJw\nXXUf4HXACdXHRuBjzZQpqSGXYj9rHuMWdONU66AWDOfM/Brw/QOG1wNbqttbgHP6xj+dPTcAKyJi\nVVPFSqrHfpbGw7DHnFdm5u7q9sPAyur2auChvu12VmPPEhEbI2J7RGzft+/pIcuQ1IBG+3nvY+0V\nqtEofUY6bjP8YdR+n3NmZkTkEM/bDGwGOOroIwZ+vqTmNdHPq148+PNVngPDr+v3Q096GB9o2Jnz\nI7PLW9XnPdX4LuD4vu3WVGOSymU/S4UZdua8FdgAfLD6fHXf+Nsi4grgZcAP+5bLJJXJftaC+meu\no5pFT9tsud+C4RwRlwOvBI6NiJ3AhfSa+MqIOB94AHhDtfk1wJnADmAv8OYWapY0JPtZTWgzqKc5\nkPstGM6Zed48D50+x7YJXFC3KEntsJ+l8eAfvpAkDe1gZrqzs2tnxQfPcJYktcpQHpzX1pYkqTCG\nsyRJhTGcJUkqjOEsSVJhDGdJkgpjOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElS\nYQxnSZIKYzhLklQYw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEWDOeIuCQi9kTEbX1j\nH4qIuyLiloj4QkSs6HvsvRGxIyLujojXtlW4pMHZz9J4OJiZ86XAGQeMXQu8JDNfCtwDvBcgIk4E\nzgV+rXrOP0XEosaqlVTXpdjPUvEWDOfM/Brw/QPGvpyZ+6u7NwBrqtvrgSsyc19mfgfYAZzSYL2S\narCfpfHQxDHntwBfrG6vBh7qe2xnNfYsEbExIrZHxPZ9+55uoAxJDajdz3sfa7lCaQrUCueIeB+w\nH7hs0Odm5ubMnMnMmSVLDqlThqQGNNXPS1csvL2k57Z42CdGxJuAs4DTMzOr4V3A8X2branGJBXM\nfpbKMtTMOSLOAN4NnJ2Ze/se2gqcGxFLImIdcALwn/XLlNQW+1kqz4Iz54i4HHglcGxE7AQupHc2\n5xLg2ogAuCEz/zgzb4+IK4E76C2PXZCZP22reEmDsZ+l8bBgOGfmeXMMX/wc238A+ECdoiS1w36W\nxoNXCJMkqTCGsyRJhTGcJUkqjOEsSVJhDGdJkgpjOEuSVJj4+cWAOiwi4lHgKeC7Xdcyj2Mps7ZS\n64Jyayu1Lnh2bS/IzOO6KmZYEfEEcHfXdcxjnP79S1FqXTA+tQ3cy0WEM0BEbM/Mma7rmEuptZVa\nF5RbW6l1Qdm1DaLk78PaBldqXTDZtbmsLUlSYQxnSZIKU1I4b+66gOdQam2l1gXl1lZqXVB2bYMo\n+fuwtsGVWhdMcG3FHHOWJEk9Jc2cJUkSBYRzRJwREXdHxI6I2NRxLcdHxPURcUdE3B4R76jG3x8R\nuyLi5urjzI7quz8ibq1q2F6NHR0R10bEvdXno0Zc04v69svNEfF4RLyzq30WEZdExJ6IuK1vbM59\nFD3/UP3s3RIRJ3dQ24ci4q7q9b8QESuq8bUR8aO+/ffxNmtrSin9bC8PXZf9PHxdzfZyZnb2ASwC\nvg28EDgU+BZwYof1rAJOrm4fAdwDnAi8H/iLLvdVVdP9wLEHjP0NsKm6vQm4qON/z4eBF3S1z4BX\nACcDty20j4AzgS8CAZwKbOugtt8GFle3L+qrbW3/duPwUVI/28uN/XvazwdfV6O93PXM+RRgR2be\nl5k/Aa4A1ndVTGbuzsybqttPAHcCq7uq5yCtB7ZUt7cA53RYy+nAtzPzga4KyMyvAd8/YHi+fbQe\n+HT23ACsiIhVo6wtM7+cmfuruzcAa9p6/REopp/t5UbYzwPU1XQvdx3Oq4GH+u7vpJAGioi1wEnA\ntmrobdVyxSVdLDdVEvhyRNwYERursZWZubu6/TCwspvSADgXuLzvfgn7DObfR6X9/L2F3m/+s9ZF\nxDcj4j8i4re6KmoApe1PwF6uwX4eXu1e7jqcixQRy4HPAe/MzMeBjwG/DPwGsBv4u45KOy0zTwZe\nB1wQEa/ofzB7ayidnH4fEYcCZwOfrYZK2WfP0OU+ei4R8T5gP3BZNbQbeH5mngS8C/jXiHheV/WN\nK3t5OPbz8Jrq5a7DeRdwfN/9NdVYZyLiEHrNfFlmfh4gMx/JzJ9m5s+Af6a3fDdymbmr+rwH+EJV\nxyOzSzfV5z1d1EbvP5mbMvORqsYi9lllvn1UxM9fRLwJOAt4Y/WfDZm5LzO/V92+kd6x3F8ddW0D\nKmJ/zrKXa7Gfh9BkL3cdzt8AToiIddVvaucCW7sqJiICuBi4MzM/3Dfef9zi94DbDnzuCGpbFhFH\nzN6md/LBbfT214Zqsw3A1aOurXIefUtgJeyzPvPto63AH1VneZ4K/LBvuWwkIuIM4N3A2Zm5t2/8\nuIhYVN1+IXACcN8oaxtCMf1sL9dmPw+o8V5u62y2g/2gd4bdPfR+m3hfx7WcRm+J5Bbg5urjTOBf\ngFur8a3Aqg5qeyG9s1+/Bdw+u6+AY4DrgHuBrwBHd1DbMuB7wJF9Y53sM3r/oewGnqZ3zOn8+fYR\nvbM6/7H62bsVmOmgth30jpPN/rx9vNr296t/55uBm4DfHfW/65DfYxH9bC/Xqs9+Hq6uRnvZK4RJ\nklSYrpe1JUnSAQxnSZIKYzhLklQYw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mS\nCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGcJUkqjOEsSVJhDGdJkgpjOEuSVBjDWZKkwhjO\nkiQVprVwjogzIuLuiNgREZvaeh1J7bKXpdGLzGz+i0YsAu4BXgPsBL4BnJeZdzT+YpJaYy9L3Vjc\n0tc9BdiRmfcBRMQVwHpgzoZedvixueKItS2VIo2n//fojd/NzOM6LmOgXgZYftgRecyyY0dUnlS+\n7z31XZ788RMxyHPaCufVwEN993cCL+vfICI2AhsBjlz+fP7kDd9oqRRpPP3VP/7CA13XwEH0Mjyz\nn49eegybzrhwNNVJY+CD//Y/Bn5OZyeEZebmzJzJzJllh3c9OZBUR38/Lz/siK7LkcZeW+G8Czi+\n7/6aakzAvmWnsW/ZaV2XIR0Me1nqQFvh/A3ghIhYFxGHAucCW1t6LUntsZelDrQSzpm5H3gb8CXg\nTuDKzLy9jdcaN/0zZmfPKp29LHWjrRPCyMxrgGva+vqSRsNelkbPK4RJklSY1mbOeqb5lrBnx5c8\n9fVRliNJKpgzZ0mSCmM4S5JUGMN5BA7mrGzf+yxJmuUx5xYZtpKkYThzliSpMIZzYZxtS5IM55bU\nCVmPP0vSdDOcJUkqjOEsSVJhDGdJkgpjODesyePFHneWpOnk+5wbYpBKkprizFmSpMIYzoXzbVWS\nNH0M5wYYnpKkJhnOkiQVxnAeE87OJWl6eLZ2DaMOzNnXW/LU10f6upKk0XLmLElSYQznIbnMLElq\ny9DhHBHHR8T1EXFHRNweEe+oxo+OiGsj4t7q81HNlSvwFwM1z36WylJn5rwf+PPMPBE4FbggIk4E\nNgHXZeYJwHXVfUlls5+lggwdzpm5OzNvqm4/AdwJrAbWA1uqzbYA59QtsiReFESTaFr7WSpVI8ec\nI2ItcBKwDViZmburhx4GVs7znI0RsT0itj/1o0ebKGOq+EuC2lK3n5/88RMjqVOaZLXDOSKWA58D\n3pmZj/c/lpkJ5FzPy8zNmTmTmTPLDj+ubhmSGtBEPy8/7IgRVCpNtlrhHBGH0GvkyzLz89XwIxGx\nqnp8FbCnXonlcKaqSTZt/SyVrM7Z2gFcDNyZmR/ue2grsKG6vQG4evjyJI2C/SyVpc4Vwl4O/CFw\na0TcXI39JfBB4MqIOB94AHhDvRK7V/KMed+y07ximJowNf0sjYOhwzkzvw7EPA+fPuzX1eC8rKfq\nsp+lsniFMEmSCmM4L6DkJW1J0mQynCeI732WpMlgOEuSVBj/nvM8nIFKkrrizHkO4x7M416/JE07\nw1mSpMIYzhPKk8MkaXx5zLmPYSZJKoEzZ0mSCmM4TzhXAyRp/LisjQEmSSqLM2dJkgoz9eE8DbNm\nz9yWpPEy9eEsSVJpDGdJkgozteE8jUu90/b9StK4mtpwliSpVIbzlJnGFQNJGjdTGc6TEE5vP+1x\n3n7a412XIUlqwVSGsyRJJat9hbCIWARsB3Zl5lkRsQ64AjgGuBH4w8z8Sd3XacI4zJgHnQ0Psv0/\nfP15/3V737LTWPLU1wd6LU2+cepnaZI1cfnOdwB3ArP/818EfCQzr4iIjwPnAx9r4HUmzqiXpQ98\nvU98aaQvr/FgP0sFqLWsHRFrgN8BPlndD+BVwFXVJluAc+q8hqTRsJ+lctSdOf898G7giOr+McBj\nmbm/ur8TWF3zNRrR9ZJ2iSdvvfW1L33W2Ce+dEsHlagQY9PP0qQbOpwj4ixgT2beGBGvHOL5G4GN\nAEcuf/6wZRStxEBeSH9gG9TTo8l+PnrpMQ1XJ02fOjPnlwNnR8SZwGH0jlF9FFgREYur37bXALvm\nenJmbgY2A6z+xZmsUYek+hrr5xccs85+lmoa+phzZr43M9dk5lrgXODfM/ONwPXA66vNNgBX166y\nhq4uujGOs+YDvfW1L51z6VuTZ1z6WZoWbbzP+T3AuyJiB71jVhe38BqSRsN+ljrQxFupyMyvAl+t\nbt8HnNLE123CqN7LO8kzTI9DT5eS+1maFl4hrAGTHMwHmqbvVZK6YjhLklSYRpa1p9W0ziJnv2+X\nuCWpHc6chzStwdzPfSBJ7TCcJUkqjMvaA3K2+EwucUtS85w5D8Bgnp/7RpKaYzhLklQYl7UPgrPC\ng+MStyQ1w5nzAgzmwXlNbkmqx3CWJKkwhrOkqbL3yTez98k3d12G9Jw85jwPl2Xre+trX+rxZ43E\nMGE7yHOWLv/UwF9fqsOZsyRJhXHmPAdnzc3xDG41qavl6Ple1xm12uLMWZKkwjhzllS0kk/e6q/N\nWbSaZDj3cTm7PZ4cpkGUHMjzMajVJJe1JUkqjDNnSUUYx9nyfJxFqy7DWVKnJimU5zL7/RnSGoTL\n2pIkFaZWOEfEioi4KiLuiog7I+I3I+LoiLg2Iu6tPh/VVLFt8Q81jIb7uWxd9POkz5r7TdP3qvrq\nzpw/CvxbZr4Y+HXgTmATcF1mngBcV92XVL6R9PPsta2nMaym+XvXYIYO54g4EngFcDFAZv4kMx8D\n1gNbqs22AOfULVJSu+xnqSx1TghbBzwKfCoifh24EXgHsDIzd1fbPAysrFeipBFovZ+dLT6TJ4rp\nudRZ1l4MnAx8LDNPAp7igCWvzEwg53pyRGyMiO0Rsf2pHz1ao4x6PAY6eu7zIjXWz0/++IlnPW4w\nz899o7nUCeedwM7M3Fbdv4pecz8SEasAqs975npyZm7OzJnMnFl2+HE1ypDUgMb6eflhR4ykYGmS\nDR3Omfkw8FBEvKgaOh24A9gKbKjGNgBX16pQUuva6mdPfjo47icdqO5FSP4UuCwiDgXuA95ML/Cv\njIjzgQeAN9R8DUmjYT9LhagVzpl5MzAzx0On1/m6kkbPfpbK4RXCJLXCZdrBuc80y3CWJKkwhrOk\nRv3sp/c7A6zBk8MEhrMkFcmQnm6GsyRJhTGcJUkqTN33OY8tLyHZrdn9/4kv3dJxJVLZ9j75Zq+/\nPYWcOUuSVJipnTnPzticQXfDGbMkzc+ZsyQVzjO3p4/hLElSYQxnSZIKYzhL0phwaXt6GM6SJBXG\ncJYkqTCGsyRJhTGcJUkqjOEsSVJhDGdJkgoz9eHsZSRHz30uDc+rhU2HqQ9nSZJKYzhLklSYWuEc\nEX8WEbdHxG0RcXlEHBYR6yJiW0TsiIjPRMShTRUrqT32s1SOocM5IlYDbwdmMvMlwCLgXOAi4COZ\n+SvAD4Dzmyi0TZ/40i0eBx0B93O5JqmfpUlQd1l7MXB4RCwGlgK7gVcBV1WPbwHOqfkakkbDfpYK\nMXQ4Z+Yu4G+BB+k18Q+BG4HHMnN/tdlOYPVcz4+IjRGxPSK2P/WjR4ctQ1IDGu3nn4yiYmmy1VnW\nPgpYD6wDfglYBpxxsM/PzM2ZOZOZM8sOP27YMiQ1oNF+9qi0VFudZe1XA9/JzEcz82ng88DLgRXV\nshjAGmBXzRoltc9+lgpSJ5wfBE6NiKUREcDpwB3A9cDrq202AFfXK3F0PGGpPe7X4k1cP0vjrM4x\n5230ThS5Cbi1+lqbgfcA74qIHcAxwMUN1CmpRfbz+PEqYZNt8cKbzC8zLwQuPGD4PuCUOl9X0ujZ\nz1I5vELYHFyCbY6HCqR2LF3+qa5LUIsM53kYKvW5/yRpOIazJEmFMZwlSSqM4SxJUmEM5wV43HRw\nHq+XpHpqvZVqWswGzVtf+9KOKymbgSxJzXDmLElSYQznATgznJ/7RpKa47L2gFzifiZDWZKa58xZ\nkqTCGM5DcsboPpC6sHT5p7x05xRwWbuGaV3iNpQlqV3OnCVJKozh3IBpmklO0/cqSV1xWbshB4bW\nJC11G8iSNFrOnCVJKozh3JJJmG16jWypLJ6lPT1c1m5Rf7CNyzK3YSxJ3XPmLElSYZw5j8hcM9Ku\nZ9POkqXx4HL29HHmLElSYRacOUfEJcBZwJ7MfEk1djTwGWAtcD/whsz8QUQE8FHgTGAv8KbMvKmd\n0sffqGfTzpRlP0vj4WCWtS8F/ifw6b6xTcB1mfnBiNhU3X8P8DrghOrjZcDHqs86SAcboLMhbuBq\nQJdiP48Vl7Sn04LL2pn5NeD7BwyvB7ZUt7cA5/SNfzp7bgBWRMSqpoqVVI/9LI2HYU8IW5mZu6vb\nDwMrq9urgYf6tttZje3mABGxEdgIcOTy5w9ZxvRyxqwGNdrPRx3eXqHStKh9tnZmZkTkEM/bDGwG\nWP2LMwM/X1Lzmujn41cM/nw9m8vZ023Ys7UfmV3eqj7vqcZ3Acf3bbemGpNULvtZKsyw4bwV2FDd\n3gBc3Tf+R9FzKvDDvuUySWV1GWLFAAAHPElEQVRqtJ9/YdFaZ301LF3+KfefFg7niLgc+L/AiyJi\nZ0ScD3wQeE1E3Au8uroPcA1wH7AD+GfgT1qpWtJQRtnPBszg3GeateAx58w8b56HTp9j2wQuqFuU\npHbYz9J48AphkiQVxmtrS2rN7DLt3iff3HElZXM5Wwdy5ixJUmEMZ0mtc2Y4P/eN5mI4S5JUGI85\nSxoJjz8/kzNmPRfDWdJI9YfStAW1gayD5bK2JEmFMZwldWaaZpLT9L2qPpe1JXVq0o9FG8oahjNn\nSZIK48xZUhEm6UQxZ8uqy3CWVJxxDGoDWU1yWVuSpMI4c5ZUtJJn0c6W1RbDWdLYmC8M2w5tQ1ij\n5rK2JEmFceYsaewNMrOdnWU7G1bJDGdJU8VQ1jhwWVuSpMIYzpIkFcZwliSpMAuGc0RcEhF7IuK2\nvrEPRcRdEXFLRHwhIlb0PfbeiNgREXdHxGvbKlzS4OxnaTwczMz5UuCMA8auBV6SmS8F7gHeCxAR\nJwLnAr9WPeefImJRY9VKqutS7GepeAuGc2Z+Dfj+AWNfzsz91d0bgDXV7fXAFZm5LzO/A+wATmmw\nXkk12M/SeGjimPNbgC9Wt1cDD/U9trMae5aI2BgR2yNi+1M/erSBMiQ1oHY/P/njJ1ouUZp8tcI5\nIt4H7AcuG/S5mbk5M2cyc2bZ4cfVKUNSA5rq5+WHHdF8cdKUGfoiJBHxJuAs4PTMzGp4F3B832Zr\nqjFJBbOfpbIMNXOOiDOAdwNnZ+bevoe2AudGxJKIWAecAPxn/TIltcV+lsqz4Mw5Ii4HXgkcGxE7\ngQvpnc25BLg2IgBuyMw/zszbI+JK4A56y2MXZOZP2ype0mDsZ2k8LBjOmXneHMMXP8f2HwA+UKco\nSe2wn6Xx4BXCJEkqjOEsSVJhDGdJkgpjOEuSVBjDWZKkwhjOkiQVJn5+MaAOi4h4FHgK+G7Xtczj\nWMqsrdS6oNzaSq0Lnl3bCzJz7K5tGxFPAHd3Xcc8xunfvxSl1gXjU9vAvVxEOANExPbMnOm6jrmU\nWlupdUG5tZVaF5Rd2yBK/j6sbXCl1gWTXZvL2pIkFcZwliSpMCWF8+auC3gOpdZWal1Qbm2l1gVl\n1zaIkr8PaxtcqXXBBNdWzDFnSZLUU9LMWZIkYThLklSczsM5Is6IiLsjYkdEbOq4luMj4vqIuCMi\nbo+Id1Tj74+IXRFxc/VxZkf13R8Rt1Y1bK/Gjo6IayPi3urzUSOu6UV9++XmiHg8It7Z1T6LiEsi\nYk9E3NY3Nuc+ip5/qH72bomIkzuo7UMRcVf1+l+IiBXV+NqI+FHf/vt4m7U1pZR+tpeHrst+Hr6u\nZns5Mzv7ABYB3wZeCBwKfAs4scN6VgEnV7ePAO4BTgTeD/xFl/uqqul+4NgDxv4G2FTd3gRc1PG/\n58PAC7raZ8ArgJOB2xbaR8CZwBeBAE4FtnVQ228Di6vbF/XVtrZ/u3H4KKmf7eXG/j3t54Ovq9Fe\n7nrmfAqwIzPvy8yfAFcA67sqJjN3Z+ZN1e0ngDuB1V3Vc5DWA1uq21uAczqs5XTg25n5QFcFZObX\ngO8fMDzfPloPfDp7bgBWRMSqUdaWmV/OzP3V3RuANW29/ggU08/2ciPs5wHqarqXuw7n1cBDffd3\nUkgDRcRa4CRgWzX0tmq54pIulpsqCXw5Im6MiI3V2MrM3F3dfhhY2U1pAJwLXN53v4R9BvPvo9J+\n/t5C7zf/Wesi4psR8R8R8VtdFTWA0vYnYC/XYD8Pr3Yvdx3ORYqI5cDngHdm5uPAx4BfBn4D2A38\nXUelnZaZJwOvAy6IiFf0P5i9NZRO3hsXEYcCZwOfrYZK2WfP0OU+ei4R8T5gP3BZNbQbeH5mngS8\nC/jXiHheV/WNK3t5OPbz8Jrq5a7DeRdwfN/9NdVYZyLiEHrNfFlmfh4gMx/JzJ9m5s+Af6a3fDdy\nmbmr+rwH+EJVxyOzSzfV5z1d1EbvP5mbMvORqsYi9lllvn1UxM9fRLwJOAt4Y/WfDZm5LzO/V92+\nkd6x3F8ddW0DKmJ/zrKXa7Gfh9BkL3cdzt8AToiIddVvaucCW7sqJiICuBi4MzM/3Dfef9zi94Db\nDnzuCGpbFhFHzN6md/LBbfT214Zqsw3A1aOurXIefUtgJeyzPvPto63AH1VneZ4K/LBvuWwkIuIM\n4N3A2Zm5t2/8uIhYVN1+IXACcN8oaxtCMf1sL9dmPw+o8V5u62y2g/2gd4bdPfR+m3hfx7WcRm+J\n5Bbg5urjTOBfgFur8a3Aqg5qeyG9s1+/Bdw+u6+AY4DrgHuBrwBHd1DbMuB7wJF9Y53sM3r/oewG\nnqZ3zOn8+fYRvbM6/7H62bsVmOmgth30jpPN/rx9vNr296t/55uBm4DfHfW/65DfYxH9bC/Xqs9+\nHq6uRnvZy3dKklSYrpe1JUnSAQxnSZIKYzhLklQYw1mSpMIYzpIkFcZwliSpMIazJEmF+f9DLBmh\n69wGYQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P8rXzGehNU_g", + "colab_type": "text" + }, + "source": [ + "### Visualise Segmentation Masks" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GNpkhNrINZHR", + "colab_type": "code", + "outputId": "1471b268-82c2-48fc-9613-72bd0a924726", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 521 + } + }, + "source": [ + "def visMask(im, seg_mask):\n", + " m = seg_mask.instances.masks\n", + " m = m.numpy().reshape([128,128])\n", + " im = np.transpose(im, (2,0,1)) # 3, 128, 128\n", + " res = im*m\n", + " res = np.transpose(res, (1,2,0)) # 128, 128, 3 \n", + " plt.imshow(res)\n", + "\n", + "im, boxlist, idx = train_dt[0]\n", + "plt.imshow(im)\n", + "plt.show()\n", + "seg_mask = boxlist.extra_fields['seg_masks']\n", + "visMask(im, seg_mask)" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQUAAAD8CAYAAAB+fLH0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAD6xJREFUeJzt3X+s3XV9x/HnSyo4MJUipqktGZ02\nGtQ5yI2WuCwGNAIzwhJjIGZ2jqRZwiY6EwfzDyCZRjejYuLYGlG7hfBjyEbDmMoKxuwPOosYhFak\ngyElhWIsdpGFWX3vj/O983zK7W57z+/L85Hc3PP9nu+533c/ved1Pp/P+d7zSVUhSfNeMukCJE0X\nQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1BhZKCQ5L8nDSfYkuWJU55E0XBnFxUtJjgN+CLwT2At8B7ik\nqnYN/WSShmrFiH7uW4A9VfUoQJKbgAuBBUNh5aqX1Kte7UhGGqVHd/3ix1X1qsWOG1UorAWe6Nve\nC7y1/4Akm4HNAKeueQmfvHnliEqRBHDxmw48fjTHTezluaq2VNVcVc2tXJVJlSHpMKMKhSeB0/q2\n13X7JE25UYXCd4ANSdYnOR64GNg2onNJGqKRzClU1aEkfwx8AzgO+HJVPTSKc0karlFNNFJVdwJ3\njurnSxoN3weU1DAUJDUMBUmNkc0pDOqce3dMugQdpbs3vnXxgzQz7ClIahgKkhqGgqSGoSCpYShI\nahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCp\nYShIaiw5FJKcluSeJLuSPJTk8m7/KUnuSvJI933V8MqVNGqD9BQOAR+tqjOAjcBlSc4ArgC2V9UG\nYHu3LWlGLDkUqmpfVX23u/1fwG5gLXAhsLU7bCtw0aBFShqfocwpJDkdOBPYAayuqn3dXU8Bq4/w\nmM1JdibZefBADaMMSUMwcCgkeTnwNeDDVXWw/76qKmDBZ3xVbamquaqaW7kqg5YhaUgGCoUkL6UX\nCDdU1W3d7qeTrOnuXwPsH6xESeM0yLsPAa4HdlfVZ/vu2gZs6m5vAm5fenmSxm2QBWbfBvw+8P0k\n3+v2/TnwKeCWJJcCjwPvG6xESeO05FCoqn8DjjQZcO5Sf66kyfKKRkkNQ0FSw1CQ1DAUJDUMBUkN\nQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1DAUJDUM\nBUkNQ0FSw1CQ1DAUJDWGscDscUnuT3JHt70+yY4ke5LcnOT4wcuUNC7D6ClcDuzu2/408Lmqei1w\nALh0COeQNCaDrjq9Dvhd4EvddoBzgFu7Q7YCFw1yDknjNWhP4fPAx4BfdtuvBJ6tqkPd9l5g7YDn\nkDRGgyxF/25gf1Xdt8THb06yM8nOgwdqqWVIGrJBl6J/T5ILgJcBK4FrgZOTrOh6C+uAJxd6cFVt\nAbYAvOYNK0wFaUosuadQVVdW1bqqOh24GLi7qt4P3AO8tztsE3D7wFVKGptRXKfwZ8CfJtlDb47h\n+hGcQ9KIDDJ8+D9V9S3gW93tR4G3DOPnSho/r2iU1DAUJDUMBUkNQ0FSw1CQ1DAUJDUMBQ3sqqvv\n56qr7590GRoSQ0FSw1DQ0NhjWB4MBQ2d4TDbDAVJDUNBI2NvYTYZCpIahoJGalrnF054bs2kS5ha\nhoKkxlA+T0GaBsf66n8sxz9/4r5jLWdmGQoai/khxDVXnznUnzuuYUD/eZZ7QDh8kNSwp6CZM+lJ\nwuXea7CnIKlhKGislvL25AnPrWm+psk017ZUhoKm2qw90ZZDOBgKkhqGgsbuaK5ynPVX3Fmu3VCQ\n1BgoFJKcnOTWJD9IsjvJ2UlOSXJXkke676uGVayWv1nvIfSb1X/LoD2Fa4GvV9XrgTcDu4ErgO1V\ntQHY3m1LL9A/jJjVJ9DRmLV/25JDIckrgN+hW0C2qv6nqp4FLgS2dodtBS4atEhJ4zNIT2E98Azw\nlST3J/lSkpOA1VU1f5nXU8DqQYvU8jZLr6KDmJV/5yChsAI4C7iuqs4EfsZhQ4WqKqAWenCSzUl2\nJtl58MCCh0iagEFCYS+wt6p2dNu30guJp5OsAei+71/owVW1parmqmpu5aoMUIakYVpyKFTVU8AT\nSV7X7ToX2AVsAzZ1+zYBtw9UoaSxGvSvJP8EuCHJ8cCjwAfpBc0tSS4FHgfeN+A5tEx96mMXTLqE\nsZufV5jmv64cKBSq6nvA3AJ3nTvIz5U0OV7RKKnhh6xo7F6Mw4bDTfMwwp6CpIahIKlhKGisHDq0\npvEqR0NBUsNQkNQwFCQ1DAVJDUNBUmNqL166e+NbJ13CEU3j0urSsNhTkNQwFCQ1DAVJDUNBUmNq\nJxq1vHh585FN219MGgoaiyv+8k7AcFjItITBPIcPkhqGwhJcc/WZXHP1mZMuQxoJQ0FSw1CQ1DAU\nBuAwQsuRoSCpYShIahgKkhoDhUKSjyR5KMmDSW5M8rIk65PsSLInyc3dknLLmvMKWk6WHApJ1gIf\nAuaq6o3AccDFwKeBz1XVa4EDwKXDKFTSeAw6fFgB/FqSFcCJwD7gHHrL0gNsBS4a8BxaRqbtkt5J\nm8b2WPLfPlTVk0k+A/wI+G/gm8B9wLNVdag7bC+wduAqZ8D8EMJPZVqYQ6zZMcjwYRVwIbAeeDVw\nEnDeMTx+c5KdSXYePFBLLUPSkA0yfHgH8FhVPVNVPwduA94GnNwNJwDWAU8u9OCq2lJVc1U1t3JV\nBihDs+b5E/dNZbd5nKa5DQYJhR8BG5OcmCTAucAu4B7gvd0xm4DbBytR0jgNMqewI8mtwHeBQ8D9\nwBbgn4GbkvxFt+/6YRQ6K5xbaDmXMHtSNfnx/GvesKI+efPKSZcxdC/2YDjaQJjGRVZHZZJDhovf\ndOC+qppb7DivaJTU8OPYRujFOpRwyDDb7ClIathTGIMXS49hqT2E50/c96KYV5jWtyAPZyiM0XIN\nh2EMF+afMMsxHGYlDOY5fJDUsKcwAf2vrLPaaxjVZOJy6jHMWg9hnj0FSQ17ChM2S72Gcb7VOOs9\nhlntJYA9BUmHsacwRQ5/JZ50z2EaLkKatbcrZ7mHMM9QmGKTGFpMQxAc7vAn2jSFxHIIgcM5fJDU\nsKcwI471FXy+ZzGNr/yD6n91nkSvYTn2DvrZU5DUsKewTC3HHsJCxtVrWO69g36GgpaNY3ninvDc\nmhfVE/1YOHyQ1DAU9KJkL+HIDAVJDUNBUsNQkNQwFCQ1DAVJDUNBUmPRUEjy5ST7kzzYt++UJHcl\neaT7vqrbnyRfSLInyQNJzhpl8ZKG72h6Cl/lhUvMXwFsr6oNwPZuG+B8YEP3tRm4bjhlShqXRUOh\nqr4N/OSw3RcCW7vbW4GL+vb/XfXcS29Z+un543dJi1rqnMLqqpq/JOwpYHV3ey3wRN9xe7t9kmbE\nwBON1Vu2+piXrk6yOcnOJDsPHpj8yteSepYaCk/PDwu67/u7/U8Cp/Udt67b9wJVtaWq5qpqbuWq\nLLEMScO21FDYBmzqbm8Cbu/b/4HuXYiNwE/7hhmSZsCin6eQ5Ebg7cCpSfYCVwGfAm5JcinwOPC+\n7vA7gQuAPcBzwAdHULOkEVo0FKrqkiPcde4CxxZw2aBFSZocr2iU1DAUJDUMBUkNQ0FSw1CQ1DAU\nJDUMBUkNQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ1DAUJDUMBUkNQ0FSw1CQ\n1DAUJDUMBUkNQ0FSw1CQ1Fg0FJJ8Ocn+JA/27furJD9I8kCSf0xyct99VybZk+ThJO8aVeGSRuNo\negpfBc47bN9dwBur6jeBHwJXAiQ5A7gYeEP3mL9OctzQqpU0couGQlV9G/jJYfu+WVWHus176S05\nD3AhcFNVPV9Vj9FbaPYtQ6xX0ogNY07hD4F/6W6vBZ7ou29vt0/SjBgoFJJ8HDgE3LCEx25OsjPJ\nzoMHapAyJA3RkkMhyR8A7wbe3y1BD/AkcFrfYeu6fS9QVVuqaq6q5lauylLLkDRkSwqFJOcBHwPe\nU1XP9d21Dbg4yQlJ1gMbgH8fvExJ47JisQOS3Ai8HTg1yV7gKnrvNpwA3JUE4N6q+qOqeijJLcAu\nesOKy6rqF6MqXtLwLRoKVXXJAruv/3+O/wTwiUGKkjQ5XtEoqWEoSGoYCpIahoKkhqEgqWEoSGoY\nCpIahoKkRn71ZwsTLCJ5BvgZ8ONJ1wKcinX0s47WLNfx61X1qsUOmopQAEiys6rmrMM6rGOydTh8\nkNQwFCQ1pikUtky6gI51tKyjtezrmJo5BUnTYZp6CpKmwFSEQpLzunUi9iS5YkznPC3JPUl2JXko\nyeXd/lOS3JXkke77qjHVc1yS+5Pc0W2vT7Kja5Obkxw/hhpOTnJrt6bH7iRnT6I9knyk+z95MMmN\nSV42rvY4wjonC7ZBer7Q1fRAkrNGXMdY1luZeCh060J8ETgfOAO4pFs/YtQOAR+tqjOAjcBl3Xmv\nALZX1QZge7c9DpcDu/u2Pw18rqpeCxwALh1DDdcCX6+q1wNv7uoZa3skWQt8CJirqjcCx9FbS2Rc\n7fFVXrjOyZHa4Hx6Hzm4AdgMXDfiOsaz3kpVTfQLOBv4Rt/2lcCVE6jjduCdwMPAmm7fGuDhMZx7\nHb1ftnOAO4DQuzBlxUJtNKIaXgE8RjfP1Ld/rO3Br5YJOIXeJ4PdAbxrnO0BnA48uFgbAH8LXLLQ\ncaOo47D7fg+4obvdPGeAbwBnL/W8E+8pMAVrRSQ5HTgT2AGsrqp93V1PAavHUMLn6X0Q7i+77VcC\nz9avFtwZR5usB54BvtINY76U5CTG3B5V9STwGeBHwD7gp8B9jL89+h2pDSb5uzuy9VamIRQmKsnL\nga8BH66qg/33VS92R/r2TJJ3A/ur6r5RnucorADOAq6rqjPpXXbeDBXG1B6r6K00th54NXASL+xG\nT8w42mAxg6y3cjSmIRSOeq2IYUvyUnqBcENV3dbtfjrJmu7+NcD+EZfxNuA9Sf4TuIneEOJa4OQk\n8x+sO4422Qvsraod3fat9EJi3O3xDuCxqnqmqn4O3EavjcbdHv2O1AZj/90ddL2VozENofAdYEM3\nu3w8vQmTbaM+aXqfTX89sLuqPtt31zZgU3d7E725hpGpqiural1VnU7v3353Vb0fuAd47xjreAp4\nIsnrul3n0vuo/rG2B71hw8YkJ3b/R/N1jLU9DnOkNtgGfKB7F2Ij8NO+YcbQjW29lVFOGh3DhMoF\n9GZT/wP4+JjO+dv0uoEPAN/rvi6gN57fDjwC/Ctwyhjb4e3AHd3t3+j+Y/cA/wCcMIbz/xaws2uT\nfwJWTaI9gGuAHwAPAn9Pb42RsbQHcCO9uYyf0+s9XXqkNqA3IfzF7vf2+/TeMRllHXvozR3M/77+\nTd/xH+/qeBg4f5Bze0WjpMY0DB8kTRFDQVLDUJDUMBQkNQwFSQ1DQVLDUJDUMBQkNf4XHdFBfzyZ\nI98AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQUAAAD8CAYAAAB+fLH0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAD2VJREFUeJzt3WusXFd5xvH/U5tAE1TiQLGMHdWu\nMKncqG0iiwbRVoiASFJEgkAoCBW3WFiVaAkUCRz4UFXqB1ARN4mmNQngVlESmktjpSoQTBDtB1xs\ngkLi4MSQQGw5cRAJVLRqcXn7YbbJLF84l5nZM+f4/5NGZ/aePWe/Xj7nOWut2TMrVYUkHfdL0y5A\n0mwxFCQ1DAVJDUNBUsNQkNQwFCQ1DAVJjYmFQpLLkhxIcjDJ9kmdR9J4ZRIXLyVZATwEvBo4BHwd\neHNV7R/7ySSN1coJfd+XAger6rsASW4GrgROGQpJvKxSmrwfVNWvznXQpIYPa4HHhrYPdft+Lsm2\nJHuT7J1QDZJa35vPQZPqKcypqnYAO8CegjRLJtVTOAycP7S9rtsnacZNKhS+DmxMsiHJWcDVwK4J\nnUvSGE1k+FBVx5L8GfAFYAXw6ap6YBLnkjReE3lJcsFFOKcg9WFfVW2e6yCvaJTUMBQkNQwFSY2p\nXacwl6OfemjaJWieXvj2l0y7BI2RPQVJDUNBUsNQkNQwFCQ1DAVJDUNBUsNQkNQwFCQ1DAVJDUNB\nUsNQkNQwFCQ1DAVJDUNBUsNQkNQwFCQ1DAVJDUNBUsNQkNQwFCQ1DAVJjUWHQpLzk9yTZH+SB5Jc\n0+0/L8ndSR7uvq4aX7mSJm2UnsIx4D1VtQm4BHhHkk3AdmB3VW0EdnfbkpaIRYdCVR2pqm909/8T\neBBYC1wJ7OwO2wlcNWqRkvozlsVgkqwHLgL2AKur6kj30OPA6tM8ZxuwbRznlzQ+I080JnkucBvw\nrqr68fBjNVjS+pQrSlfVjqraPJ9VcCX1Z6RQSPIsBoFwY1Xd3u1+Isma7vE1wNHRSpTUp1FefQhw\nA/BgVX1k6KFdwJbu/hbgzsWXJ6lvo8wpvBz4I+BbSb7Z7Xs/8EHgc0m2At8D3jRaiZL6tOhQqKp/\nB3Kahy9d7PeVNF1e0SipYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSG\noSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqTGOBaYXZHk3iR3ddsb\nkuxJcjDJLUnOGr1MSX0ZR0/hGuDBoe0PAR+tqhcDTwFbx3AOST0ZddXpdcAfAtd32wFeCdzaHbIT\nuGqUc0jq16g9hY8B7wV+1m0/H3i6qo5124eAtSOeQ1KPRlmK/rXA0arat8jnb0uyN8nexdYgafxG\nXYr+dUmuAJ4D/ArwceDcJCu73sI64PCpnlxVO4AdAElqhDokjdGiewpVdW1Vrauq9cDVwJer6i3A\nPcAbu8O2AHeOXKWk3kziOoX3AX+R5CCDOYYbJnAOSRMyyvDh56rqK8BXuvvfBV46ju8rqX9e0Sip\nYShIahgKkhqGgqSGoSCpYShIahgKGtkFb3iUC97w6LTL0JgYCpIahoLGxh7D8mAoaOwMh6XNUJDU\nMBQ0MfYWliZDQVLDUNBEzer8wh17Nk27hJllKEhqjOXzFKRZsNC//gs5/vW/u3+h5SxZhoJ6cXwI\nceC29WP9vn0NA4bPs9wDwuGDpIY9BS05054kXO69BnsKkhr2FNSrC97w6ILnFabdM/hFTqxtOfQc\n7Clops1yIJzKHXs2LbmaT2QoSGoYCurdfK5yXOp/cZdy7YaCpMZIE41JzgWuBy4ECngbcAC4BVgP\nPAq8qaqeGqlKnTGW8l/YEx3/tyy1ycdULX7B5yQ7gX+rquuTnAWcDbwf+GFVfTDJdmBVVb1vju9z\nUhFHP/XQoutSv37/82eN9PwDt61fVmFwOjMQDvuqavNcBy16+JDkecAf0C0gW1X/W1VPA1cCO7vD\ndgJXLfYckvo3ypzCBuBJ4DNJ7k1yfZJzgNVVdaQ75nFg9ahFank7E3oJsHT+naOEwkrgYuC6qroI\n+AmwffiAGoxNTjk+SbItyd4ke0eoQdKYjRIKh4BDVbWn276VQUg8kWQNQPf16KmeXFU7qmrzfMY4\nkvqz6FCoqseBx5Jc0O26FNgP7AK2dPu2AHeOVKGkXo363oc/B27sXnn4LvAnDILmc0m2At8D3jTi\nObRMffC9V0y7hN4thZcpRwqFqvomcKru/6WjfF9J0+MVjZIavnVavTsThw0nmuVhhD0FSQ1DQVLD\nUFCvHDq0ZvEqR0NBUsNQkNQwFCQ1DAVJDUNBUmNmL1564dtfMu0STmsWl1aXxsWegqSGoSCpYShI\nahgKkhozO9Go5cXLm09v1t4xOdK6D2Mr4hTrPswyX31YPMPhZD2GwWTXfZC0PBkKi3DgtvUcuG39\ntMuQJsJQkNQwFCQ1DIUROIzQcmQoSGoYCpIahoKkxkihkOTdSR5Icn+Sm5I8J8mGJHuSHExyS7ek\n3LLmvIKWk0WHQpK1wDuBzVV1IbACuBr4EPDRqnox8BSwdRyFSurHqMOHlcAvJ1kJnA0cAV7JYFl6\ngJ3AVSOeQ8vIrFzfPytmsT0W/Yaoqjqc5MPA94H/Br4I7AOerqpj3WGHgLUjV7kEHB9C+L6IU3OI\ntXSMMnxYBVwJbABeBJwDXLaA529LsjfJ3sXWIGn8Rnnr9KuAR6rqSYAktwMvB85NsrLrLawDDp/q\nyVW1A9jRPXdJvUtSozneZZ7F1ZH6MovDhuNGmVP4PnBJkrOTBLgU2A/cA7yxO2YLcOdoJUrq0yhz\nCnuS3Ap8AzgG3MvgL/+/ADcn+etu3w3jKHSpcG6h5VzC0uOHrEzQmR4M8w2EM2kYMeVhgx+yImnh\n/IzGCTpThxIOGZY2ewqSGs4p9Gi59xhG6SGcCfMKM/Ay5LzmFAyFKVhu4TDO4cJyDIcZCIPjnGiU\ntHD2FKZsqfYaJj2ZuBx6DDPUQzjOnoKkhbOnMENmvdcwjZcal2qPYQZ7CWBPQdJi2FOYYdPuOczK\nRUhLqbcwoz2E43xJcjnpKyBmJQh+kVkKiRkPgRM5fJC0cPYUlqnjPYul8Jd/FNPoNSyx3sEwewqS\nFs6egpaNSfYalnDvYJgTjdLp3LFn03L5RV8Ihw+SFs6egnTmsKcgaeEMBUkNQ0FSw1CQ1DAUJDUM\nBUmNOUMhyaeTHE1y/9C+85LcneTh7uuqbn+SfCLJwST3Jbl4ksVLGr/59BQ+y8lLzG8HdlfVRmB3\ntw1wObCxu20DrhtPmZL6MmcoVNVXgR+esPtKYGd3fydw1dD+f6iBrzFYln7NuIqVNHmLnVNYXVVH\nuvuPA6u7+2uBx4aOO9Ttk7REjLyWZFXVYi5TTrKNwRBD0gxZbE/hiePDgu7r0W7/YeD8oePWdftO\nUlU7qmrzfK7FltSfxYbCLmBLd38LcOfQ/rd2r0JcAvxoaJghaSmoql94A24CjgA/ZTBHsBV4PoNX\nHR4GvgSc1x0b4JPAd4BvAZvn+v7d88qbN28Tv+2dz++jb52Wzhy+dVrSwhkKkhqGgqSGoSCpYShI\nahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCpYShIahgKkhqGgqSGoSCp\nYShIahgKkhqGgqSGoSCpYShIaswZCkk+neRokvuH9v1Nkm8nuS/JHUnOHXrs2iQHkxxI8ppJFS5p\nMubTU/gscNkJ++4GLqyq3wIeAq4FSLIJuBr4ze45f5tkxdiqlTRxc4ZCVX0V+OEJ+75YVce6za8x\nWHIe4Erg5qr6n6p6BDgIvHSM9UqasHHMKbwN+Nfu/lrgsaHHDnX7JC0RK0d5cpIPAMeAGxfx3G3A\ntlHOL2n8Fh0KSf4YeC1waT2znv1h4Pyhw9Z1+05SVTuAHd33cil6aUYsaviQ5DLgvcDrquq/hh7a\nBVyd5NlJNgAbgf8YvUxJfZmzp5DkJuAVwAuSHAL+ksGrDc8G7k4C8LWq+tOqeiDJ54D9DIYV76iq\n/5tU8ZLGL8/0/KdYhMMHqQ/7qmrzXAd5RaOkhqEgqWEoSGoYCpIahoKkhqEgqWEoSGoYCpIaI70h\naox+APyk+zptL8A6hllHaynX8WvzOWgmrmgESLJ3PldbWYd1WMdk63D4IKlhKEhqzFIo7Jh2AR3r\naFlHa9nXMTNzCpJmwyz1FCTNgJkIhSSXdetEHEyyvadznp/kniT7kzyQ5Jpu/3lJ7k7ycPd1VU/1\nrEhyb5K7uu0NSfZ0bXJLkrN6qOHcJLd2a3o8mORl02iPJO/u/k/uT3JTkuf01R6nWefklG2QgU90\nNd2X5OIJ19HLeitTD4VuXYhPApcDm4A3d+tHTNox4D1VtQm4BHhHd97twO6q2gjs7rb7cA3w4ND2\nh4CPVtWLgaeArT3U8HHg81X1G8Bvd/X02h5J1gLvBDZX1YXACgZrifTVHp/l5HVOTtcGlzP4yMGN\nDD6E+LoJ19HPeitVNdUb8DLgC0Pb1wLXTqGOO4FXAweANd2+NcCBHs69jsEP2yuBu4AwuDBl5ana\naEI1PA94hG6eaWh/r+3BM8sEnMfg4rq7gNf02R7AeuD+udoA+Hvgzac6bhJ1nPDY64Ebu/vN7wzw\nBeBliz3v1HsKzMBaEUnWAxcBe4DVVXWke+hxYHUPJXyMwQfh/qzbfj7wdD2z4E4fbbIBeBL4TDeM\nuT7JOfTcHlV1GPgw8H3gCPAjYB/9t8ew07XBNH92J7beyiyEwlQleS5wG/Cuqvrx8GM1iN2JvjyT\n5LXA0araN8nzzMNK4GLguqq6iMFl581Qoaf2WMVgpbENwIuAczi5Gz01fbTBXEZZb2U+ZiEU5r1W\nxLgleRaDQLixqm7vdj+RZE33+Brg6ITLeDnwuiSPAjczGEJ8HDg3yfH3pvTRJoeAQ1W1p9u+lUFI\n9N0erwIeqaonq+qnwO0M2qjv9hh2ujbo/Wd3aL2Vt3QBNfY6ZiEUvg5s7GaXz2IwYbJr0ifN4LPp\nbwAerKqPDD20C9jS3d/CYK5hYqrq2qpaV1XrGfzbv1xVbwHuAd7YYx2PA48luaDbdSmDj+rvtT0Y\nDBsuSXJ29390vI5e2+MEp2uDXcBbu1chLgF+NDTMGLve1luZ5KTRAiZUrmAwm/od4AM9nfP3GHQD\n7wO+2d2uYDCe3w08DHwJOK/HdngFcFd3/9e7/9iDwD8Bz+7h/L8D7O3a5J+BVdNoD+CvgG8D9wP/\nyGCNkV7aA7iJwVzGTxn0nraerg0YTAh/svu5/RaDV0wmWcdBBnMHx39e/27o+A90dRwALh/l3F7R\nKKkxC8MHSTPEUJDUMBQkNQwFSQ1DQVLDUJDUMBQkNQwFSY3/B5CihLXFt5GeAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ORQXaa6k30yD", + "colab_type": "text" + }, + "source": [ + "# Training a Model\n", + "\n", + "Now we move on to training our very own model. Here we will be finetuning the base of a Mask RCNN, modifying it to support Semantic Segmentation and change the number of classes to support this dataset. To do this we need\n", + "\n", + "1. A base model that has the same amount of output classes as our dataset. In this case, we have need for only 3 classes instead of COCO's 80. Hence , we first need to do some model trimming. \n", + "\n", + "2. Second, we need to build a Panoptic FPN model. That means attaching the semantic segmentation branch to the FPN.\n", + "\n", + "3. FInally, we write a loss function to train the semantic segmentation head.\n", + "\n", + "4. Lastly, set to train !" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SVaNqbpiAzwx", + "colab_type": "text" + }, + "source": [ + "## Model Trimming" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hbzY16ocEdrg", + "colab_type": "text" + }, + "source": [ + "### Helper Functions for Visualising Detections" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yk5a6RpsEdIt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class Resize(object):\n", + " def __init__(self, min_size, max_size):\n", + " self.min_size = min_size\n", + " self.max_size = max_size\n", + "\n", + " # modified from torchvision to add support for max size\n", + " def get_size(self, image_size):\n", + " w, h = image_size\n", + " size = self.min_size\n", + " max_size = self.max_size\n", + " if max_size is not None:\n", + " min_original_size = float(min((w, h)))\n", + " max_original_size = float(max((w, h)))\n", + " if max_original_size / min_original_size * size > max_size:\n", + " size = int(round(max_size * min_original_size / max_original_size))\n", + "\n", + " if (w <= h and w == size) or (h <= w and h == size):\n", + " return (h, w)\n", + "\n", + " if w < h:\n", + " ow = size\n", + " oh = int(size * h / w)\n", + " else:\n", + " oh = size\n", + " ow = int(size * w / h)\n", + "\n", + " return (oh, ow)\n", + "\n", + " def __call__(self, image):\n", + " size = self.get_size(image.size)\n", + " image = F.resize(image, size)\n", + " return image\n", + " \n", + " \n", + "class COCODemo(object):\n", + " \n", + " def __init__(\n", + " self,\n", + " cfg,\n", + " confidence_threshold=0.7,\n", + " show_mask_heatmaps=False,\n", + " masks_per_dim=2,\n", + " min_image_size=224,\n", + " convert_model=False\n", + " ):\n", + " self.cfg = cfg.clone()\n", + " if convert_model:\n", + " self.model = build_detection_model(cfg)\n", + " else:\n", + " self.model = build_panoptic_network(cfg)\n", + " self.training = False\n", + "\n", + " self.model.eval()\n", + " self.device = torch.device(cfg.MODEL.DEVICE)\n", + " self.model.to(self.device)\n", + " self.min_image_size = min_image_size\n", + "\n", + " save_dir = cfg.OUTPUT_DIR\n", + " checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n", + " _ = checkpointer.load(cfg.MODEL.WEIGHT)\n", + "\n", + " self.transforms = self.build_transform()\n", + "\n", + " mask_threshold = -1 if show_mask_heatmaps else 0.5\n", + " self.masker = Masker(threshold=mask_threshold, padding=1)\n", + "\n", + " # used to make colors for each class\n", + " self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n", + "\n", + " self.cpu_device = torch.device(\"cpu\")\n", + " self.confidence_threshold = confidence_threshold\n", + " self.show_mask_heatmaps = show_mask_heatmaps\n", + " self.masks_per_dim = masks_per_dim\n", + "\n", + " def build_transform(self):\n", + " \"\"\"\n", + " Creates a basic transformation that was used to train the models\n", + " \"\"\"\n", + " cfg = self.cfg\n", + "\n", + " # we are loading images with OpenCV, so we don't need to convert them\n", + " # to BGR, they are already! So all we need to do is to normalize\n", + " # by 255 if we want to convert to BGR255 format, or flip the channels\n", + " # if we want it to be in RGB in [0-1] range.\n", + " if cfg.INPUT.TO_BGR255:\n", + " to_bgr_transform = T.Lambda(lambda x: x * 255)\n", + " else:\n", + " to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n", + "\n", + " normalize_transform = T.Normalize(\n", + " mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n", + " )\n", + " min_size = cfg.INPUT.MIN_SIZE_TEST\n", + " max_size = cfg.INPUT.MAX_SIZE_TEST\n", + " transform = T.Compose(\n", + " [\n", + " T.ToPILImage(),\n", + " Resize(min_size, max_size),\n", + " T.ToTensor(),\n", + " to_bgr_transform,\n", + " normalize_transform,\n", + " ]\n", + " )\n", + " return transform\n", + "\n", + " def run_on_opencv_image(self, image, panoptic=False, objDet=False, semantic=False):\n", + " \"\"\"\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " mask, predictions = self.compute_prediction(image)\n", + " top_predictions = self.select_top_predictions(predictions)\n", + " \n", + " \n", + " result = image.copy()\n", + " \n", + " if semantic or panoptic:\n", + " height, width = image.shape[:-1]\n", + "\n", + " # overlay segmentation mask first\n", + " mask = np.squeeze(mask)\n", + " mask = (mask > 0.5).astype(np.uint8)\n", + " result = np.transpose(result, (2,0,1))\n", + "\n", + " mask = cv2.resize(mask, dsize=(width, height), interpolation=cv2.INTER_CUBIC)\n", + " result = result*mask\n", + " result = np.transpose(result, (1,2,0))\n", + " \n", + " if objDet or panoptic:\n", + "\n", + " if self.show_mask_heatmaps:\n", + " return self.create_mask_montage(result, top_predictions)\n", + " result = self.overlay_boxes(result, top_predictions)\n", + " if self.cfg.MODEL.MASK_ON:\n", + " result = self.overlay_mask(result, top_predictions)\n", + " if self.cfg.MODEL.KEYPOINT_ON:\n", + " result = self.overlay_keypoints(result, top_predictions)\n", + " result = self.overlay_class_names(result, top_predictions)\n", + "\n", + " return result\n", + "\n", + " def compute_prediction(self, original_image):\n", + " \"\"\"\n", + " Arguments:\n", + " original_image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " # apply pre-processing to image\n", + " image = self.transforms(original_image)\n", + " # convert to an ImageList, padded so that it is divisible by\n", + " # cfg.DATALOADER.SIZE_DIVISIBILITY\n", + " image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " image_list = image_list.to(self.device)\n", + " # compute predictions\n", + " with torch.no_grad():\n", + " semantic_mask, predictions = self.model(image_list)\n", + " predictions = [o.to(self.cpu_device) for o in predictions]\n", + "\n", + " # always single image is passed at a time\n", + " prediction = predictions[0]\n", + "\n", + " # reshape prediction (a BoxList) into the original image size\n", + " height, width = original_image.shape[:-1]\n", + " prediction = prediction.resize((width, height))\n", + "\n", + " if prediction.has_field(\"mask\"):\n", + " # if we have masks, paste the masks in the right position\n", + " # in the image, as defined by the bounding boxes\n", + " masks = prediction.get_field(\"mask\")\n", + " # always single image is passed at a time\n", + " masks = self.masker([masks], [prediction])[0]\n", + " prediction.add_field(\"mask\", masks)\n", + " return semantic_mask.cpu().detach().numpy(), prediction\n", + "\n", + " def select_top_predictions(self, predictions):\n", + " \"\"\"\n", + " Select only predictions which have a `score` > self.confidence_threshold,\n", + " and returns the predictions in descending order of score\n", + " Arguments:\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores`.\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\")\n", + " keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n", + " predictions = predictions[keep]\n", + " scores = predictions.get_field(\"scores\")\n", + " _, idx = scores.sort(0, descending=True)\n", + " return predictions[idx]\n", + "\n", + " def compute_colors_for_labels(self, labels):\n", + " \"\"\"\n", + " Simple function that adds fixed colors depending on the class\n", + " \"\"\"\n", + " colors = labels[:, None] * self.palette\n", + " colors = (colors % 255).numpy().astype(\"uint8\")\n", + " return colors\n", + "\n", + " def overlay_boxes(self, image, predictions):\n", + " \"\"\"\n", + " Adds the predicted boxes on top of the image\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `labels`.\n", + " \"\"\"\n", + " labels = predictions.get_field(\"labels\")\n", + " boxes = predictions.bbox\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for box, color in zip(boxes, colors):\n", + " box = box.to(torch.int64)\n", + " top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n", + " image = cv2.rectangle(\n", + " image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + " def overlay_mask(self, image, predictions):\n", + " \"\"\"\n", + " Adds the instances contours for each predicted object.\n", + " Each label has a different color.\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask` and `labels`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\").numpy()\n", + " labels = predictions.get_field(\"labels\")\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for mask, color in zip(masks, colors):\n", + " thresh = mask[0, :, :, None]\n", + " contours, hierarchy = cv2_util.findContours(\n", + " thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n", + " )\n", + " image = cv2.drawContours(image, contours, -1, color, 3)\n", + "\n", + " composite = image\n", + "\n", + " return composite\n", + "\n", + " def overlay_keypoints(self, image, predictions):\n", + " keypoints = predictions.get_field(\"keypoints\")\n", + " kps = keypoints.keypoints\n", + " scores = keypoints.get_field(\"logits\")\n", + " kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n", + " for region in kps:\n", + " image = vis_keypoints(image, region.transpose((1, 0)))\n", + " return image\n", + "\n", + " def create_mask_montage(self, image, predictions):\n", + " \"\"\"\n", + " Create a montage showing the probability heatmaps for each one one of the\n", + " detected objects\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\")\n", + " masks_per_dim = self.masks_per_dim\n", + " masks = L.interpolate(\n", + " masks.float(), scale_factor=1 / masks_per_dim\n", + " ).byte()\n", + " height, width = masks.shape[-2:]\n", + " max_masks = masks_per_dim ** 2\n", + " masks = masks[:max_masks]\n", + " # handle case where we have less detections than max_masks\n", + " if len(masks) < max_masks:\n", + " masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n", + " masks_padded[: len(masks)] = masks\n", + " masks = masks_padded\n", + " masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n", + " result = torch.zeros(\n", + " (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n", + " )\n", + " for y in range(masks_per_dim):\n", + " start_y = y * height\n", + " end_y = (y + 1) * height\n", + " for x in range(masks_per_dim):\n", + " start_x = x * width\n", + " end_x = (x + 1) * width\n", + " result[start_y:end_y, start_x:end_x] = masks[y, x]\n", + " return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n", + "\n", + " def overlay_class_names(self, image, predictions):\n", + " \"\"\"\n", + " Adds detected class names and scores in the positions defined by the\n", + " top-left corner of the predicted bounding box\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores` and `labels`.\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\").tolist()\n", + " labels = predictions.get_field(\"labels\").tolist()\n", + " labels = [self.CATEGORIES[i] for i in labels]\n", + " boxes = predictions.bbox\n", + "\n", + " template = \"{}: {:.2f}\"\n", + " for box, score, label in zip(boxes, scores, labels):\n", + " x, y = box[:2]\n", + " s = template.format(label, score)\n", + " cv2.putText(\n", + " image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n", + "\n", + "def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n", + " \"\"\"Visualizes keypoints (adapted from vis_one_image).\n", + " kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n", + " \"\"\"\n", + " dataset_keypoints = PersonKeypoints.NAMES\n", + " kp_lines = PersonKeypoints.CONNECTIONS\n", + "\n", + " # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n", + " cmap = plt.get_cmap('rainbow')\n", + " colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n", + " colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n", + "\n", + " # Perform the drawing on a copy of the image, to allow for blending.\n", + " kp_mask = np.copy(img)\n", + "\n", + " # Draw mid shoulder / mid hip first for better visualization.\n", + " mid_shoulder = (\n", + " kps[:2, dataset_keypoints.index('right_shoulder')] +\n", + " kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n", + " sc_mid_shoulder = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_shoulder')],\n", + " kps[2, dataset_keypoints.index('left_shoulder')])\n", + " mid_hip = (\n", + " kps[:2, dataset_keypoints.index('right_hip')] +\n", + " kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n", + " sc_mid_hip = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_hip')],\n", + " kps[2, dataset_keypoints.index('left_hip')])\n", + " nose_idx = dataset_keypoints.index('nose')\n", + " if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n", + " color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n", + " if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n", + " color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n", + "\n", + " # Draw the keypoints.\n", + " for l in range(len(kp_lines)):\n", + " i1 = kp_lines[l][0]\n", + " i2 = kp_lines[l][1]\n", + " p1 = kps[0, i1], kps[1, i1]\n", + " p2 = kps[0, i2], kps[1, i2]\n", + " if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, p1, p2,\n", + " color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n", + " if kps[2, i1] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p1,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + " if kps[2, i2] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p2,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + "\n", + " # Blend the keypoints.\n", + " return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "If8z4OZfDHmC", + "colab_type": "text" + }, + "source": [ + "### Base Model Config\n", + "\n", + "This is the base model that we will finetune from. First we need to replace the bounding box heads and mask heads to make it compatible with our Shapes Dataset." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "wM0coO44ClbV", + "colab_type": "code", + "outputId": "7ecc3f19-8b98-4a66-c155-a630b6036691", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "%%writefile base_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"catalog://Caffe2Detectron/COCO/35858933/e2e_mask_rcnn_R-50-FPN_1x\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing base_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mOo-0LGFEAmc", + "colab_type": "text" + }, + "source": [ + "### Pretrained weight removal\n", + "\n", + "Here, the pretrained weights of bbox, mask and class predictions are removed. This is done so that we can make the model shapes dataset compatible i.e predict 3 classes instead of Coco's 81 classes." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ISFsxBxBDZcQ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def removekey(d, listofkeys):\n", + " r = dict(d)\n", + " for key in listofkeys:\n", + " print('key: {} is removed'.format(key))\n", + " r.pop(key)\n", + " return r\n", + " \n", + "logger_dir = 'log'\n", + "\n", + "if logger_dir:\n", + " mkdir(logger_dir)\n", + "\n", + "logger = setup_logger(\"maskrcnn_benchmark\", logger_dir, get_rank())\n", + "logger.info(\"Using {} GPUs\".format(1))\n", + "\n", + "config_file = \"base_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n", + "\n", + "demo = COCODemo(\n", + " cfg, \n", + " min_image_size=800,\n", + " confidence_threshold=0.7,\n", + " convert_model=True)\n", + "\n", + "base_model = demo.model\n", + "\n", + "# Removes pretrained weights from state dict\n", + "new_state_dict = removekey(base_model.state_dict(), [ \n", + " \"roi_heads.box.predictor.cls_score.weight\", \"roi_heads.box.predictor.cls_score.bias\", \n", + " \"roi_heads.box.predictor.bbox_pred.weight\", \"roi_heads.box.predictor.bbox_pred.bias\",\n", + " \"roi_heads.mask.predictor.mask_fcn_logits.weight\", \"roi_heads.mask.predictor.mask_fcn_logits.bias\"\n", + " ])\n", + "\n", + "# Save new state dict, we will use this as our starting weights for our fine-tuned model\n", + "torch.save(new_state_dict, \"base_model.pth\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bbCBInqHFUg7", + "colab_type": "text" + }, + "source": [ + "### Fine Tuned Model Config\n", + "\n", + "Here we define our shape Dataset config. The important fields are \n", + "\n", + "1. WEIGHT: which point to our base_model.pth saved in the previous step\n", + "2. NUM_CLASSES: Which define how many classes we will predict . note that the number includes the background, hence our shapes dataset has 4 classes. \n", + "3. PANOPTIC.CHANNEL_SIZE: To set the channel size of the segmentation head of the FPN.\n", + "4. PANOPTIC.NUM_CLASSES: Number of classes of semantic segmentation head." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5AhIiTgmFXyi", + "colab_type": "code", + "outputId": "2cfd6b5c-348a-4ca0-ad06-d73937ca8c09", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "%%writefile shapes_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"base_model.pth\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " NUM_CLASSES: 4 # background + num_classes : IMPORTANT dont forget to add this\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + " PANOPTIC:\n", + " CHANNEL_SIZE: 128\n", + " NUM_CLASSES: 1 # just 1 class to seperate foreground from background\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Overwriting shapes_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tAn3omCjTFGI", + "colab_type": "text" + }, + "source": [ + "### Data Loader\n", + "\n", + "This function creates a data loader with our shapes dataset. This data loader is used internally in the repo to train the model." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oODu2UpVTHXz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_data_loader(cfg, dataset, is_train=True, is_distributed=False, start_iter=0):\n", + " num_gpus = get_world_size()\n", + " if is_train:\n", + " images_per_batch = cfg.SOLVER.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = True\n", + " num_iters = cfg.SOLVER.MAX_ITER\n", + " else:\n", + " images_per_batch = cfg.TEST.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = False if not is_distributed else True\n", + " num_iters = None\n", + " start_iter = 0\n", + "\n", + " if images_per_gpu > 1:\n", + " logger = logging.getLogger(__name__)\n", + " logger.warning(\n", + " \"When using more than one image per GPU you may encounter \"\n", + " \"an out-of-memory (OOM) error if your GPU does not have \"\n", + " \"sufficient memory. If this happens, you can reduce \"\n", + " \"SOLVER.IMS_PER_BATCH (for training) or \"\n", + " \"TEST.IMS_PER_BATCH (for inference). For training, you must \"\n", + " \"also adjust the learning rate and schedule length according \"\n", + " \"to the linear scaling rule. See for example: \"\n", + " \"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14\"\n", + " )\n", + "\n", + " # group images which have similar aspect ratio. In this case, we only\n", + " # group in two cases: those with width / height > 1, and the other way around,\n", + " # but the code supports more general grouping strategy\n", + " aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []\n", + "\n", + " paths_catalog = import_file(\n", + " \"maskrcnn_benchmark.config.paths_catalog\", cfg.PATHS_CATALOG, True\n", + " )\n", + " DatasetCatalog = paths_catalog.DatasetCatalog\n", + " dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST\n", + "\n", + " # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later\n", + " transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms(cfg, is_train)\n", + " \n", + " dataset.transforms = transforms\n", + " datasets = [ dataset ]\n", + " \n", + " data_loaders = []\n", + " for dataset in datasets:\n", + " sampler = make_data_sampler(dataset, shuffle, is_distributed)\n", + " batch_sampler = make_batch_data_sampler(\n", + " dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter\n", + " )\n", + " collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \\\n", + " BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " num_workers = cfg.DATALOADER.NUM_WORKERS\n", + " data_loader = torch.utils.data.DataLoader(\n", + " dataset,\n", + " num_workers=num_workers,\n", + " batch_sampler=batch_sampler,\n", + " collate_fn=collator,\n", + " )\n", + " data_loaders.append(data_loader)\n", + " if is_train:\n", + " # during training, a single (possibly concatenated) data_loader is returned\n", + " assert len(data_loaders) == 1\n", + " return data_loaders[0]\n", + " return data_loaders" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xs_KL1R1aGSA", + "colab_type": "text" + }, + "source": [ + "### Semantic Segmentation Loss\n", + "\n", + "Loss for the Semantic Segmentation Head of the Model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9ipih-wPaJqK", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class SegLoss(nn.Module):\n", + " \n", + " def __init__(self):\n", + " super(SegLoss, self).__init__()\n", + "\n", + " def prepare_target(self, targets):\n", + " labels = []\n", + "\n", + " for t in targets:\n", + " t = t.get_field(\"seg_masks\").get_mask_tensor().unsqueeze(0)\n", + " labels.append(t)\n", + "\n", + " return cat(labels, dim=0).unsqueeze(1).to(\"cuda\", dtype=torch.float32)\n", + "\n", + " def forward(self, mask, target):\n", + " '''\n", + " mask : Tensor\n", + " target : list[Boxlist]\n", + " '''\n", + " \n", + " target = self.prepare_target(target)\n", + "\n", + " loss = Fx.binary_cross_entropy_with_logits(mask, target)\n", + " \n", + " return loss" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zBrlwqT7RsdJ", + "colab_type": "text" + }, + "source": [ + "### Segmenter Model\n", + "\n", + "The model modifies the FPN of the Mask RCNN as per [this](https://arxiv.org/abs/1901.02446) paper" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dJMk5lxwRvTh", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def panoptic_upsampler_block(in_channels, out_channels, expansion):\n", + " \n", + " modules = []\n", + " \n", + " if expansion == 0:\n", + " modules.append( make_conv3x3(\n", + " in_channels,\n", + " out_channels,\n", + " dilation=1,\n", + " stride=1,\n", + " use_gn=True,\n", + " use_relu=True,\n", + " kaiming_init=True\n", + " )) # no upsample\n", + " \n", + " for i in range(expansion):\n", + " modules.append(make_conv3x3(\n", + " in_channels if i == 0 else out_channels,\n", + " out_channels,\n", + " dilation=1,\n", + " stride=1,\n", + " use_gn=True,\n", + " use_relu=True,\n", + " kaiming_init=True\n", + " ))\n", + " modules.append(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))\n", + " \n", + " return nn.Sequential(*modules)\n", + "\n", + "\n", + "class PanopticRCNN(nn.Module):\n", + "\n", + " def __init__(self, cfg, num_classes):\n", + " super(PanopticRCNN, self).__init__()\n", + "\n", + " \n", + " self.scale1_block = panoptic_upsampler_block(in_channels=cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS,\n", + " out_channels=cfg.MODEL.PANOPTIC.CHANNEL_SIZE, expansion=3) # 1/32\n", + " self.scale2_block = panoptic_upsampler_block(in_channels=cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS,\n", + " out_channels=cfg.MODEL.PANOPTIC.CHANNEL_SIZE, expansion=2) # 1/16\n", + " self.scale3_block = panoptic_upsampler_block(in_channels=cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS, \n", + " out_channels=cfg.MODEL.PANOPTIC.CHANNEL_SIZE, expansion=1) # 1/8\n", + " self.scale4_block = panoptic_upsampler_block(in_channels=cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS,\n", + " out_channels=cfg.MODEL.PANOPTIC.CHANNEL_SIZE, expansion=0) # 1/4\n", + " \n", + " self.num_classes = num_classes\n", + " \n", + " self.final_seg_mask = nn.Sequential(\n", + " nn.Conv2d(kernel_size=1, in_channels=128, out_channels=self.num_classes),\n", + " nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)\n", + " )\n", + " \n", + " \n", + " def forward(self, features):\n", + " \"\"\"\n", + " Arguments:\n", + " features (list[Tensor]): feature maps gen post FPN, (N, C, H, W)\n", + " Returns:\n", + " segmentation_mask: semantic segmentation mask\n", + " \"\"\"\n", + " \n", + " \n", + " x1 = self.scale1_block(features[3])\n", + " \n", + " x2 = self.scale2_block(features[2])\n", + " \n", + " x3 = self.scale3_block(features[1])\n", + " \n", + " x4 = self.scale4_block(features[0])\n", + " \n", + " x = x1 + x2 + x3 + x4\n", + " \n", + " seg_mask = self.final_seg_mask(x)\n", + " \n", + " return seg_mask\n", + "\n", + "\n", + "class PanopticModel(nn.Module):\n", + " \"\"\"\n", + " Main class for Panoptic R-CNN. Currently supports boxes and masks.\n", + " It consists of three main parts:\n", + " - backbone\n", + " - rpn\n", + " - panoptic: ouputs semantic segmentation mask\n", + " - heads: takes the features + the proposals from the RPN and computes\n", + " detections / masks from it.\n", + " \"\"\"\n", + " def __init__(self, cfg):\n", + " super(PanopticModel, self).__init__()\n", + "\n", + " self.backbone = build_backbone(cfg)\n", + " self.loss = SegLoss()\n", + " self.training = True\n", + " self.rpn = build_rpn(cfg, self.backbone.out_channels)\n", + " self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)\n", + " self.panoptic = PanopticRCNN(cfg, num_classes=cfg.MODEL.PANOPTIC.NUM_CLASSES)\n", + " \n", + "\n", + " def forward(self, images, targets=None):\n", + " \"\"\"\n", + " Arguments:\n", + " images (list[Tensor] or ImageList): images to be processed\n", + " targets (list[BoxList]): ground-truth boxes present in the image (optional)\n", + " Returns:\n", + " result (list[BoxList] or dict[Tensor]): the output from the model.\n", + " During training, it returns a dict[Tensor] which contains the losses.\n", + " During testing, it returns list[BoxList] contains additional fields\n", + " like `scores`, `labels` and `mask` (for Mask R-CNN models).\n", + " \"\"\"\n", + "\n", + " images = to_image_list(images)\n", + " features = self.backbone(images.tensors) \n", + " seg_mask = self.panoptic(features)\n", + " proposals, proposal_losses = self.rpn(images, features, targets)\n", + " \n", + " \n", + " if self.roi_heads:\n", + " x, result, detector_losses = self.roi_heads(features, proposals, targets)\n", + " else:\n", + " # RPN-only models don't have roi_heads\n", + " x = features\n", + " result = proposals\n", + " detector_losses = {}\n", + "\n", + " if self.training:\n", + " segmentation_loss = self.loss(seg_mask, targets)\n", + " \n", + " losses = {}\n", + " losses.update(detector_losses)\n", + " losses.update(proposal_losses)\n", + " losses.update(dict(segmentation_loss=segmentation_loss))\n", + " \n", + " return losses\n", + " \n", + " return seg_mask, result\n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NVjPYFN1Pz6D", + "colab_type": "text" + }, + "source": [ + "### Build Panoptic Network" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WE6K5qZ7Pt5T", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_panoptic_network(cfg):\n", + " return PanopticModel(cfg)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kkLKDmRC0-CE", + "colab_type": "text" + }, + "source": [ + "### Train Panoptic\n", + "\n", + "The train function is the entry point into the training process. It creates data loaders, optimisers, loads from checkpoint. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4e2-533F1Qmu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# See if we can use apex.DistributedDataParallel instead of the torch default,\n", + "# and enable mixed-precision via apex.amp\n", + "try:\n", + " from apex import amp\n", + "except ImportError:\n", + " raise ImportError('Use APEX for multi-precision via apex.amp')\n", + " \n", + "def reduce_loss_dict(loss_dict):\n", + " \"\"\"\n", + " Reduce the loss dictionary from all processes so that process with rank\n", + " 0 has the averaged results. Returns a dict with the same fields as\n", + " loss_dict, after reduction.\n", + " \"\"\"\n", + " world_size = get_world_size()\n", + " if world_size < 2:\n", + " return loss_dict\n", + " with torch.no_grad():\n", + " loss_names = []\n", + " all_losses = []\n", + " for k in sorted(loss_dict.keys()):\n", + " loss_names.append(k)\n", + " all_losses.append(loss_dict[k])\n", + " all_losses = torch.stack(all_losses, dim=0)\n", + " dist.reduce(all_losses, dst=0)\n", + " if dist.get_rank() == 0:\n", + " # only main process gets accumulated, so only divide by\n", + " # world_size in this case\n", + " all_losses /= world_size\n", + " reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n", + " return reduced_losses\n", + "\n", + "\n", + "def do_train_panoptic(\n", + " model,\n", + " data_loader,\n", + " optimizer,\n", + " scheduler,\n", + " checkpointer,\n", + " device,\n", + " checkpoint_period,\n", + " arguments,\n", + "):\n", + " logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n", + " logger.error(\"Start training\")\n", + " meters = MetricLogger(delimiter=\" \")\n", + " max_iter = len(data_loader)\n", + " start_iter = arguments[\"iteration\"]\n", + " model.train()\n", + " start_training_time = time.time()\n", + " end = time.time()\n", + " \n", + " for iteration, (images, targets, _) in enumerate(data_loader, start_iter):\n", + " \n", + " if any(len(target) < 1 for target in targets):\n", + " logger.error(f\"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}\" )\n", + " continue\n", + " data_time = time.time() - end\n", + " iteration = iteration + 1\n", + " arguments[\"iteration\"] = iteration\n", + "\n", + " scheduler.step()\n", + "\n", + " images = images.to(device)\n", + " targets = [target.to(device) for target in targets]\n", + " \n", + " loss_dict = model(images, targets)\n", + " \n", + " losses = sum(loss for loss in loss_dict.values())\n", + " \n", + " # reduce losses over all GPUs for logging purposes\n", + " loss_dict_reduced = reduce_loss_dict(loss_dict)\n", + " losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n", + " meters.update(loss=losses_reduced, **loss_dict_reduced)\n", + "\n", + " optimizer.zero_grad()\n", + " # Note: If mixed precision is not used, this ends up doing nothing\n", + " # Otherwise apply loss scaling for mixed-precision recipe\n", + " with amp.scale_loss(losses, optimizer) as scaled_losses:\n", + " scaled_losses.backward()\n", + " optimizer.step()\n", + "\n", + " batch_time = time.time() - end\n", + " end = time.time()\n", + " meters.update(time=batch_time, data=data_time)\n", + "\n", + " eta_seconds = meters.time.global_avg * (max_iter - iteration)\n", + " eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n", + "\n", + " if iteration % 20 == 0 or iteration == max_iter:\n", + " logger.info(\n", + " meters.delimiter.join(\n", + " [\n", + " \"eta: {eta}\",\n", + " \"iter: {iter}\",\n", + " \"{meters}\",\n", + " \"lr: {lr:.6f}\",\n", + " \"max mem: {memory:.0f}\",\n", + " ]\n", + " ).format(\n", + " eta=eta_string,\n", + " iter=iteration,\n", + " meters=str(meters),\n", + " lr=optimizer.param_groups[0][\"lr\"],\n", + " memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n", + " )\n", + " )\n", + " if iteration % checkpoint_period == 0:\n", + " checkpointer.save(\"model_{:07d}\".format(iteration), **arguments)\n", + " if iteration == max_iter:\n", + " checkpointer.save(\"model_final\", **arguments)\n", + "\n", + " total_training_time = time.time() - start_training_time\n", + " total_time_str = str(datetime.timedelta(seconds=total_training_time))\n", + " logger.info(\n", + " \"Total training time: {} ({:.4f} s / it)\".format(\n", + " total_time_str, total_training_time / (max_iter)\n", + " ))\n", + "\n", + "def train_panoptic(cfg, local_rank, distributed, dataset):\n", + " model = build_panoptic_network(cfg)\n", + "\n", + " device = torch.device('cuda')\n", + " model.to(device)\n", + " \n", + " optimizer = make_optimizer(cfg, model)\n", + " scheduler = make_lr_scheduler(cfg, optimizer) \n", + "\n", + " # Initialize mixed-precision training\n", + " use_mixed_precision = cfg.DTYPE == \"float16\"\n", + " amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n", + " model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)\n", + "\n", + " if distributed:\n", + " model = torch.nn.parallel.DistributedDataParallel(\n", + " model, device_ids=[local_rank], output_device=local_rank,\n", + " # this should be removed if we update BatchNorm stats\n", + " broadcast_buffers=False,\n", + " )\n", + "\n", + " arguments = {}\n", + " arguments[\"iteration\"] = 0\n", + "\n", + " output_dir = cfg.OUTPUT_DIR\n", + " save_to_disk = get_rank() == 0\n", + " checkpointer = DetectronCheckpointer(\n", + " cfg, model, optimizer, scheduler, output_dir, save_to_disk\n", + " )\n", + " extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n", + " arguments.update(extra_checkpoint_data)\n", + "\n", + "\n", + " data_loader = build_data_loader(cfg, dataset)\n", + "\n", + " checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n", + "\n", + " do_train_panoptic(\n", + " model,\n", + " data_loader,\n", + " optimizer,\n", + " scheduler,\n", + " checkpointer,\n", + " device,\n", + " checkpoint_period,\n", + " arguments,\n", + " )\n", + "\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pVeJNhzy2DZs", + "colab_type": "text" + }, + "source": [ + "## Train Panoptic Driver\n", + "\n", + "here we fire off training by calling the above function. before that we set some important config for our training. We make our dataset and update our config. Then we fire off training !" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "XtgfPl7F2CEP", + "colab_type": "code", + "colab": {} + }, + "source": [ + "config_file = \"shapes_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDir']) # The output folder where all our model checkpoints will be saved during training.\n", + "cfg.merge_from_list(['SOLVER.IMS_PER_BATCH', 25]) # Number of images to take insiade a single batch. This number depends on the size of your GPU\n", + "cfg.merge_from_list(['SOLVER.BASE_LR', 0.0001]) # The Learning Rate when training starts. Please check Detectron scaling rules to determine your learning for your GPU setup. \n", + "cfg.merge_from_list(['SOLVER.MAX_ITER', 1000]) # The number of training iterations that will be executed during training. One iteration is given as one forward and backward pass of a mini batch of the network\n", + "cfg.merge_from_list(['SOLVER.STEPS', \"(700, 800)\"]) # These two numberes represent after how many iterations is the learning rate divided by 10. \n", + "cfg.merge_from_list(['TEST.IMS_PER_BATCH', 1]) # Batch size during testing/evaluation\n", + "cfg.merge_from_list(['MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN', 2000]) # This determines how many region proposals to take in for processing into the stage after the RPN. The rule is 1000*batch_size = 4*1000 \n", + "cfg.merge_from_list(['SOLVER.CHECKPOINT_PERIOD', 100]) # After how many iterations does one want to save the model.\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "# Make the Output dir if one doesnt exist.\n", + "output_dir = cfg.OUTPUT_DIR\n", + "if output_dir:\n", + " mkdir(output_dir)\n", + "\n", + "# Start training.\n", + "model = train_panoptic(cfg, local_rank=1, distributed=False, dataset=ShapeDataset(2000))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ccHt8YMdKq6K", + "colab_type": "text" + }, + "source": [ + "# Visualise\n", + "\n", + "An important part of validating your model is visualising the results. This is done below" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kb9VchvVzRpu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Load Trained Model\n", + "config_file = \"shapes_config.yaml\"\n", + "\n", + "cfg.merge_from_file(config_file)\n", + "# manual override some options\n", + "# cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n", + "\n", + "# manual override some options\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDir']) # The output folder where all our model checkpoints will be saved during training.\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TEST', 192])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TEST', 192])\n", + "# cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n", + "\n", + "\n", + "vis_demo = COCODemo(\n", + " cfg, \n", + " min_image_size=192,\n", + " confidence_threshold=0.7)\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n", + "\n", + "# Load Dataset\n", + "dataset = ShapeDataset(50)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c8b6wHAXjyE5", + "colab_type": "text" + }, + "source": [ + "## Visualise" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RSPq97dtWFrA", + "colab_type": "text" + }, + "source": [ + "### Input Image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ir-cYCvKSbNI", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "outputId": "03b774d2-f7f1-4e86-e56b-37a734cb9e72" + }, + "source": [ + "# Visualise Input Image\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i)\n", + "# image = np.array(img)[:, :, [2, 1, 0]]\n", + "# result = vis_demo.run_on_opencv_image(image)\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(img)\n", + "plt.show()" + ], + "execution_count": 50, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X2QZHV56PHvk13wBcFhWWrd7KJA\nQkw2VhSyIhZetcQkiIYlFa8XtJJVudlrXUw0JqWoRWmVpVfzotGqRFwDYc1FEVGLvdEYkQBWrLBh\nQeRtBVYU2a2FZV1XKLXExef+0We0GWZ2p/uc7vPr099P1dR0n+6efvo388wzz3NOn4nMRJIkleOX\n2g5AkiQ9lsVZkqTCWJwlSSqMxVmSpMJYnCVJKozFWZKkwoysOEfE6RFxZ0Rsj4jzR/U8kkbLXJbG\nL0bxPueIWALcBfwOsAO4ATgnM+9o/MkkjYy5LLVjVJ3zycD2zLwnMx8BLgPWjei5JI2OuSy1YOmI\nvu4q4L6+6zuA5y105yVHzOQhR68cUSjSZPrJPd/ck5lHtxzGQLkMMLN8aa58xiEjDUrjc0jsbzuE\nibfjO4+yd8/PYpDHjKo4H1REbAA2ACxd/jSe/n82tRWKVKS7/8fz7m07hsXqz+enPf0QPrHl+JYj\nUlNWxJ62Q5h4r3je3oEfM6qx9k7gmL7rq6ttP5eZGzNzbWauXXLEzIjCkFTTQXMZHpvPM8uXjC04\nqatGVZxvAE6IiOMi4lDgbGDziJ5L0uiYy1ILRjLWzsz9EfFG4N+AJcDFmXn7KJ5L0uiYy1I7RrbP\nOTO/CHxxVF9f0niYy9L4eYYwSZIKY3GWJKkwFmdJkgpjcZYkqTAWZ0mSCmNxlqQh7XnDc9oOQR3V\n2uk7JalEgxbcQe6//MKbBw1HU8rOWZKkwtg5S5pa4x5Lz/d8dtOaj8VZ0tQocR/x3Jgs1gLH2pIk\nFcfOWVKnldgtH4idtMDOWZKk4licJXXSnjc8Z+K65vl04TVocI61JXVGVwtZ/+tyzD0d7JwlSSqM\nnbOkidfVjnk+s6/VDrrb7JwlTbRpKsz9pvV1TwuLsyRJhXGsLWki2Tk64u4yO2dJE8fC/FiuR/dY\nnCVJKszQxTkijomIayLijoi4PSLeVG1fFhFXRcTd1ecjmwtX0ihMSj535cQio+DadEudznk/8BeZ\nuQY4BTgvItYA5wNXZ+YJwNXVdUllKz6fLTyL4zp1w9DFOTN3ZeZN1eWHgW3AKmAdsKm62ybgrLpB\nShot81kqSyNHa0fEscCJwBZgRWbuqm66H1jRxHNIGo/S8tlOcHAexT35ah8QFhFPAT4LvDkzH+q/\nLTMTyAUetyEitkbE1kcf2lc3DEkNaCKf9+15dAyRSt1WqzhHxCH0EvnSzPxctfmBiFhZ3b4S2D3f\nYzNzY2auzcy1S46YqROGpAY0lc8zy5eMJ2Cpw+ocrR3ARcC2zPxg302bgfXV5fXAlcOHJ2kcSs1n\nR9r1eAT35Kqzz/lU4I+AWyNidsfGO4D3A5dHxLnAvcCr6oUoaQzMZ6kgQxfnzPwPIBa4+bRhv66k\n8Sstn+32mrXnDc/x4LAJ4xnCJEkqjMVZkqTC+F+pJBXDcfbo+N7nyWLnLElSYSzOkiQVxuIsSVJh\nLM6SiuD+5vFwnSeDxVmSpMJYnCVJKozFWZKkwlicJUkqjCchkdQqD1AaP09IUj47Z0mSCmNxliSp\nMBZnSZIKY3GWJKkwFmdJkgpjcZYkqTAWZ0mSCmNxliSpMBZnSZIKY3GWJKkwtYtzRCyJiK9HxL9U\n14+LiC0RsT0iPh0Rh9YPU9I4jDufPXVnu1z/cjXROb8J2NZ3/QPAhzLzV4HvA+c28BySxmOs+ey5\nndvl+perVnGOiNXAy4F/rK4H8BLgiuoum4Cz6jyHpPEwn6Vy1O2c/w54K/Cz6vpRwL7M3F9d3wGs\nmu+BEbEhIrZGxNZHH9pXMwxJDWgkn/fteXT0kUodN3RxjohXALsz88ZhHp+ZGzNzbWauXXLEzLBh\nSGpAk/k8s3xJw9FJ06fO/3M+FTgzIs4AnggcAXwYmImIpdVf26uBnfXDlDRi5rNUkKE758x8e2au\nzsxjgbOBf8/M1wDXAK+s7rYeuLJ2lJJGynyWyjKK9zm/DXhLRGynt8/qohE8h6TxMJ+lFtQZa/9c\nZl4LXFtdvgc4uYmvK2n8zGepfZ4hTJKkwlicJUkqjMVZkqTCNLLPWZKGNXsKSc/zPD6etrN8ds6S\nJBXG4ixJUmEszpIkFcbiLElSYSzOA7hg6XVthyB1lgcpjYfrPBmm/mjtQQvuIPd/z/4XDRqOJEl2\nzpIklWaqOudxj6Xnez67aUnSwXS6OJe4j3huTBZr6Rc8IcnoDLuv+YFc3nAk0+en+dDAj3GsLUlS\nYTrXOZfYLR+InbQkaS47Z0mSCtOZ4nzB0usmrmueTxdeg1TX8gtv9v24DXItJ89Ej7W7Wsj6X5dj\nbk2z5Rfe7MFhNViUJ1dnOmdJkrpiIjvnrnbM85l9rXbQkjQ9Jq5znqbC3G9aX7fk/ufBuWaTb+KK\nsyRJXVerOEfETERcERHfjIhtEfH8iFgWEVdFxN3V5yObCLQrR2PX4RpolMaZz8OwE1wc16kb6nbO\nHwa+lJm/Djwb2AacD1ydmScAV1fXa7EgPZbroREZSz7X4bh2Ya5NtwxdnCPiqcALgYsAMvORzNwH\nrAM2VXfbBJxVN0hJo2U+S2Wp0zkfBzwI/FNEfD0i/jEiDgNWZOau6j73Ayvme3BEbIiIrRGx9dGH\n9s37BI5xF+baqGGN5fO+PY+OPFg7xMdyPbqnTnFeCpwEfDQzTwR+yJyRV2YmkPM9ODM3ZubazFy7\n5IiZx91u4Vkc10kNaSyfZ5YvGXmw4BgXXIMuq1OcdwA7MnNLdf0Kesn9QESsBKg+764XoqQxMJ+l\nggxdnDPzfuC+iHhmtek04A5gM7C+2rYeuHKQr+u4dnCumeoaVT6Pw7R2jtP6uqdF3TOE/SlwaUQc\nCtwDvI5ewb88Is4F7gVeVfM5JI3HxObzbKGahvNwW5SnQ63inJk3A2vnuem0Ol9X0viZz1I5ijq3\ntqPZejwPt6Zdf1fZpS7abnn6ePpOSZIKU0RxXhkP2zU3yLWUuvM2oy68Bg2uqLG2JDVt0kbdFmNB\nIZ2zJEn6BTvnjvLgMOnx5nalJXTSdsqaj8VZ0tSarzCOsmBbiLVYjrUlSSqMnbMk9Rmku93zhufY\nDWsk7Jw7zrdVSaNjYdaoWJwlSSqMxVmSpMJYnCVJKozFWZKkwlicp8AFS6/zwDBJmiAWZ0mSCmNx\nliSpMBZnSZIKY3GWJKkwFmdJkgpjcZYkqTAWZ0mSClPrv1JFxJ8D/xNI4FbgdcBK4DLgKOBG4I8y\n85GacWqCPHn1T9sOQUMwn6VyDF2cI2IV8GfAmsz8cURcDpwNnAF8KDMvi4gLgXOBjzYSraSRMJ/L\n8Kz//s62QyjebZ95b9shjEXdsfZS4EkRsRR4MrALeAlwRXX7JuCsms8haTzMZ6kQQxfnzNwJ/A3w\nXXpJ/AN6Y699mbm/utsOYNV8j4+IDRGxNSK2PvyDHw4bhgbgKTy1kCbzed+eR8cRstRpQxfniDgS\nWAccB/wycBhw+mIfn5kbM3NtZq49/KmHDRuGBvCe/S9qOwQVqsl8nlm+ZERRStOjzlj7pcC3M/PB\nzPwp8DngVGCmGosBrAZ21oxR0uiZz1JB6hTn7wKnRMSTIyKA04A7gGuAV1b3WQ9cWS9ESWNgPksF\nqbPPeQu9A0Vuove2i18CNgJvA94SEdvpvf3iogbilDRC5rNUllrvc87MdwHvmrP5HuDkOl9X0viZ\nz1I5PEOYJEmFsThLklQYi7MkSYWxOEuSVBiLsyRJhbE4S5JUmFpvpdJk8LSdkjRZ7JwlSSqMxVmS\npMJYnCVJKozFWZKkwlicO86DwSRp8licJUkqjMVZkqTCWJwlSSqMJyHpKPc1S9LksnOWJKkwFmdJ\nkgpTRHHelYc7hm2QaylJk62I4ixJkn6hqOJsx1fPe/a/yDWUpA4oqjhLkqRFFOeIuDgidkfEbX3b\nlkXEVRFxd/X5yGp7RMRHImJ7RNwSESeNMnhJgzGfpcmwmM75EuD0OdvOB67OzBOAq6vrAC8DTqg+\nNgAfHTQgR7ODc800gEsYYz5LGs5Bi3NmfhXYO2fzOmBTdXkTcFbf9k9kz/XATESsbCpYSfWYz9Jk\nGHaf84rM3FVdvh9YUV1eBdzXd78d1bbHiYgNEbE1IrY++tC+x91uJ7g4rpMa0Gg+79vz6OgilaZE\n7dN3ZmZGRA7xuI3ARoAn/spvzPv42cJzwdLr6oTYSRZljUIT+fwbv/2kgR8v6bGG7ZwfmB1vVZ93\nV9t3Asf03W91tU1SucxnqTDDFufNwPrq8nrgyr7tf1wd5XkK8IO+cdnQ7BIfy/VQw8aaz5IO7qBj\n7Yj4FPBiYHlE7ADeBbwfuDwizgXuBV5V3f2LwBnAduBHwOuaCtQRt0VZ9ZWSz5IO7KDFOTPPWeCm\n0+a5bwLn1Q1K0miYz9JkmLgzhE1r9zitr1uSplHto7XbME0jbouyJE2fieucJUnquonsnGf1d5Vd\n6qLtliVputk5S5JUmM4U567884cuvAZJUj0TPdaez6SNui3GkqS5OtM5S5LUFZ3rnPvN7UpL6KTt\nlCVJB9Pp4jzXfIVxlAXbQixJGoZjbUmSCjNVnfN8BuluL1h6nd2wJGnk7JwHYGGWJI2DxVmSpMJY\nnCVJKozFWZKkwlicJUkqjMVZkqTCWJwlSSqMxVmSpMJYnCVJKozFWZKkwhy0OEfExRGxOyJu69v2\n1xHxzYi4JSI+HxEzfbe9PSK2R8SdEfF7owpc0uDMZ2kyLKZzvgQ4fc62q4BnZeZvAXcBbweIiDXA\n2cBvVo/5h4hY0li0kuq6BPNZKt5Bi3NmfhXYO2fblzNzf3X1emB1dXkdcFlm/iQzvw1sB05uMF5J\nNZjP0mRoYp/z64F/rS6vAu7ru21Hte1xImJDRGyNiK2PPrSvgTAkNaB2Pu/b8+iIQ5S6r1Zxjoh3\nAvuBSwd9bGZuzMy1mbl2yREzB3+ApJFqKp9nljv5luoa+v85R8RrgVcAp2VmVpt3Asf03W11tU1S\nwcxnqSxDdc4RcTrwVuDMzPxR302bgbMj4gkRcRxwAvBf9cOUNCrms1Seg3bOEfEp4MXA8ojYAbyL\n3tGcTwCuigiA6zPzDZl5e0RcDtxBbzx2Xma6A0oqhPksTYaDFufMPGeezRcd4P7vBd5bJyhJo2E+\nS5PBM4RJklQYi7MkSYWxOEuSVJih30olSWrWbZ9x97567JwlSSqMxVmSpMLEL04G1GIQEQ8CPwT2\ntB3LApZTZmylxgXlxlZqXPD42J6RmUe3FcywIuJh4M6241jAJH3/S1FqXDA5sQ2cy0UUZ4CI2JqZ\na9uOYz6lxlZqXFBubKXGBWXHNoiSX4exDa7UuKDbsTnWliSpMBZnSZIKU1Jx3th2AAdQamylxgXl\nxlZqXFB2bIMo+XUY2+BKjQs6HFsx+5wlSVJPSZ2zJEmigOIcEadHxJ0RsT0izm85lmMi4pqIuCMi\nbo+IN1Xb3x0ROyPi5urjjJbi+05E3FrFsLXatiwiroqIu6vPR445pmf2rcvNEfFQRLy5rTWLiIsj\nYndE3Na3bd41ip6PVD97t0TESS3E9tcR8c3q+T8fETPV9mMj4sd963fhKGNrSin5bC4PHZf5PHxc\nzeZyZrb2ASwBvgUcDxwKfANY02I8K4GTqsuHA3cBa4B3A3/Z5lpVMX0HWD5n218B51eXzwc+0PL3\n837gGW2tGfBC4CTgtoOtEXAG8K9AAKcAW1qI7XeBpdXlD/TFdmz//Sbho6R8Npcb+36az4uPq9Fc\nbrtzPhnYnpn3ZOYjwGXAuraCycxdmXlTdflhYBuwqq14FmkdsKm6vAk4q8VYTgO+lZn3thVAZn4V\n2Dtn80JrtA74RPZcD8xExMpxxpaZX87M/dXV64HVo3r+MSgmn83lRpjPA8TVdC63XZxXAff1Xd9B\nIQkUEccCJwJbqk1vrMYVF7cxbqok8OWIuDEiNlTbVmTmrury/cCKdkID4GzgU33XS1gzWHiNSvv5\nez29v/xnHRcRX4+I6yLiv7UV1ABKW0/AXK7BfB5e7VxuuzgXKSKeAnwWeHNmPgR8FPgV4DnALuBv\nWwrtBZl5EvAy4LyIeGH/jdmbobRy+H1EHAqcCXym2lTKmj1Gm2t0IBHxTmA/cGm1aRfw9Mw8EXgL\n8MmIOKKt+CaVuTwc83l4TeVy28V5J3BM3/XV1bbWRMQh9JL50sz8HEBmPpCZj2bmz4CP0xvfjV1m\n7qw+7wY+X8XxwOzopvq8u43Y6P2SuSkzH6hiLGLNKgutURE/fxHxWuAVwGuqXzZk5k8y83vV5Rvp\n7cv9tXHHNqAi1nOWuVyL+TyEJnO57eJ8A3BCRBxX/aV2NrC5rWAiIoCLgG2Z+cG+7f37Lf4AuG3u\nY8cQ22ERcfjsZXoHH9xGb73WV3dbD1w57tgq59A3AithzfostEabgT+ujvI8BfhB37hsLCLidOCt\nwJmZ+aO+7UdHxJLq8vHACcA944xtCMXks7lcm/k8oMZzeVRHsy32g94RdnfR+2vinS3H8gJ6I5Jb\ngJurjzOAfwZurbZvBla2ENvx9I5+/QZw++xaAUcBVwN3A18BlrUQ22HA94Cn9m1rZc3o/ULZBfyU\n3j6ncxdaI3pHdf599bN3K7C2hdi209tPNvvzdmF13z+svs83AzcBvz/u7+uQr7GIfDaXa8VnPg8X\nV6O57BnCJEkqTNtjbUmSNIfFWZKkwlicJUkqjMVZkqTCWJwlSSqMxVmSpMJYnCVJKozFWZKkwlic\nJUkqjMVZkqTCWJwlSSqMxVmSpMJYnCVJKozFWZKkwlicJUkqjMVZkqTCWJwlSSqMxVmSpMJYnCVJ\nKozFWZKkwlicJUkqzMiKc0ScHhF3RsT2iDh/VM8jabTMZWn8IjOb/6IRS4C7gN8BdgA3AOdk5h2N\nP5mkkTGXpXYsHdHXPRnYnpn3AETEZcA6YN6EXn74TB579C+PKBRpMt347W17MvPolsMYKJcBjjo0\ncvWTxxSdHmfvkw5vO4TGLPvxw22H0IgdP4LvPZIxyGNGVZxXAff1Xd8BPK//DhGxAdgA8PTlT+OG\n9/3fEYUiTaZfOue37207BhaRy/DYfF79JPjyi0b1q0UH88k1j/v2TKxX33Ft2yE04nev2z/wY1rL\noMzcCGwEWHv8muZn65LGpj+fnz0T5vOYfXLNi9sOYST6X1dXCvVijao47wSO6bu+utomabKYywXr\nalGez+xrnZYiPaqjtW8AToiI4yLiUOBsYPOInkvS6JjLUgtGUpwzcz/wRuDfgG3A5Zl5+yieS9Lo\nmMvlmqauud+0vO6R7XPOzC8CXxzV15c0HuZyWaalOB3INIy4PUOYJEmFsThL0oSwa36sLq+Hb0aU\npMJ1uQjV1dURt52zJEmFsThLUsHsmhena+vkWFuSCtS1YjMOXRpx2zlLklQYi7MkSYWxOEtSQT65\n5sWOtGvqwvpZnCVJKozFWSPzha/tbTsEaaJ0oeMrxaRPICzOkiQVxrdSqXH9HfPs5ZefuqytcCRp\n4licJallkzx+Ld2kvvfZsbYkSYWxOKsxX/ja3gUPAvPgMElaPIuzJEmFsThLklQYi7MasZix9YHG\n3tK08mCw8Zi0dfZobdVisZWk5tk5S5JUGIuzxs5uW5IObOjiHBHHRMQ1EXFHRNweEW+qti+LiKsi\n4u7q85HNhauS1Cmy7n8ui/k8fpN+7udJNElrXqdz3g/8RWauAU4BzouINcD5wNWZeQJwdXVdUtnM\nZ6kgQxfnzNyVmTdVlx8GtgGrgHXApupum4Cz6gYpabTMZ6ksjexzjohjgROBLcCKzNxV3XQ/sGKB\nx2yIiK0RsfXBh7/fRBgakyZH0o62y1M3n/c+MpYwpU6rXZwj4inAZ4E3Z+ZD/bdlZgI53+Myc2Nm\nrs3MtUcf7m4sqQRN5POyQ8cQqNRxtYpzRBxCL5EvzczPVZsfiIiV1e0rgd31QlRJ7HS7y3yWylHn\naO0ALgK2ZeYH+27aDKyvLq8Hrhw+PEnjYD5LZalzhrBTgT8Cbo2Im6tt7wDeD1weEecC9wKvqhei\num62G3/5qctajmSqmc9SQYYuzpn5H0AscPNpw35dlclxdreZz1JZPEOYJEmFsTirGHbnktRjcdZB\njbNoelpPTYNJOYVkV03C+lucJUkqjMVZksbs1Xdc23YIU20S1r/OW6nUcW2Ol317laRpZucsSVJh\nLM6alwdlSVJ7LM4qmn8kSJpGFmdJkgrjAWF6DDtVSWqfnbOK54lJJE0bi7MkSYWxOAuYjO609Pgk\nqSkWZ0mSCuMBYbIjlVowewrJSfgnDF0xCaftnGXnrIkyCeN3SarL4ixJUmEca08xO1BJKpOdsyaS\nf1ioKyZpP+gkm7R1tjhLklSY2sU5IpZExNcj4l+q68dFxJaI2B4Rn46IQ+uHqabZeWo+5rNUhiY6\n5zcB2/qufwD4UGb+KvB94NwGnkMN6dLRzl16LQUxn6UC1CrOEbEaeDnwj9X1AF4CXFHdZRNwVp3n\nkDQe5rNUjrpHa/8d8Fbg8Or6UcC+zNxfXd8BrKr5HNIBfeFre3n5qcvaDqMLzOeWeEKS0Zm0A8Fm\nDd05R8QrgN2ZeeOQj98QEVsjYuuDD39/2DA0AEfAWkiT+bz3kYaDk6ZQnc75VODMiDgDeCJwBPBh\nYCYillZ/ba8Gds734MzcCGwEWHv8mqwRh6T6GsvnZ8+E+SzVNHTnnJlvz8zVmXkscDbw75n5GuAa\n4JXV3dYDV9aOUjoIDw6rx3wuw6SOYEv06juunej1HMX7nN8GvCUittPbZ3XRCJ5D0niYz1ILGjl9\nZ2ZeC1xbXb4HOLmJr6tm2FFqEOZzuzw4rL5J7phneYawjpu2wjxtr1dSN1mcJUkqjP+VSp0z2z37\n3mdNMsfbg+vCOHuWnXNHefSy1A1dKjij1LV1sjhLklQYx9rqLMfb6gpH3AvrWsc8y865gxxnS93U\n1UI0rC6vh8VZkqTCONbuEDvm+flfq9Qljri73THPsnOWpAk0DQVqPtPyui3OkiQVxrF2BzjOPjiP\n3FYXTdOIe1o65lkWZ00V9z+ri/oLV5cK9bQV5H6OtSVJKozFecI50pbUrwvd5qvvuLYTr6MOi7Mk\nSYVxn7OmjgeHqevmdp2TsB962jvluSzOE8pxtqRZF3z8fbznT96x4O0lFmuL8YE51pYkqTB2zhPI\nrrkZvq1Kk+6Cj7/vcZcP1EHPmq9rHaSbvuHf/2rR913wa9T+Cu340NNOHsvzWJwlSQONmSe1sE4S\nx9qSJBWmVnGOiJmIuCIivhkR2yLi+RGxLCKuioi7q89HNhXstPvC1/Y60m6Ya/oL5vNk6R9pL2a7\nJkvdzvnDwJcy89eBZwPbgPOBqzPzBODq6rpUNAs0YD5LxRi6OEfEU4EXAhcBZOYjmbkPWAdsqu62\nCTirbpCSRst8lspS54Cw44AHgX+KiGcDNwJvAlZk5q7qPvcDK+qFKLCz08iZzxNiMWPrQY7cVpnq\njLWXAicBH83ME4EfMmfklZkJ5HwPjogNEbE1IrY++PD3a4TRbe4THZ8pX+vG8nnvIyOPdWq5P3l6\n1CnOO4Admbmlun4FveR+ICJWAlSfd8/34MzcmJlrM3Pt0Yd7jInUssbyedmhY4lX6rShi3Nm3g/c\nFxHPrDadBtwBbAbWV9vWA1fWilDSyJnP3XTBx99ntz2h6p6E5E+BSyPiUOAe4HX0Cv7lEXEucC/w\nqprPMbWmeMTaqin+xxjmc6HqFtiDnXtb5alVnDPzZmDtPDedVufrSho/81kqh2cIkySpMBZnSZIK\nY3Eu0JS/pacYfg9UgqYO6PLgsMnif6Uq0DQdiPTD533lMdcP2/LSliKRpHLYOUuSVBg7Z7Vibsc8\nd7sdtKbdqEbQvq1qMlicJakg7hcWONaWJKk4FmeN3UIj7bn3Wcz9JA3OI7fLZ3HW2FhwpQMbd8G0\nQJfL4ixJUmEsziqanbakaeTR2hqLOkXWt1ep69ocL88+t2+vKoudsyRJhbE4S1KLPChL87E4a6Sa\nPELb/c/S6PhHQlkszpIkFcYDwiSpBXaqOhA7Z0mSCmNx1siMYh+xZxmTRsfTepbD4qzGWUClAyu9\nAJYe3zSwOEuSVJhaxTki/jwibo+I2yLiUxHxxIg4LiK2RMT2iPh0RBzaVLDSLDvz5pnPUjmGLs4R\nsQr4M2BtZj4LWAKcDXwA+FBm/irwfeDcJgJV+cY9znZ83hzzeTwmaZ/uJMXaRXXH2kuBJ0XEUuDJ\nwC7gJcAV1e2bgLNqPoek8TCfpUIMXZwzcyfwN8B36SXxD4AbgX2Zub+62w5g1XyPj4gNEbE1IrY+\n+PD3hw1DUgOazOe9j4wj4sljF6pB1BlrHwmsA44Dfhk4DDh9sY/PzI2ZuTYz1x59+JHDhqFCtDle\ndrRdX5P5vMy90p3ieLsddcbaLwW+nZkPZuZPgc8BpwIz1VgMYDWws2aMkkbPfJYKUuf0nd8FTomI\nJwM/Bk4DtgLXAK8ELgPWA1fWDVLlsmvtDPN5ROw6NYw6+5y30DtQ5Cbg1uprbQTeBrwlIrYDRwEX\nNRCndEAeuV2P+ayD8Y+M8ar1jy8y813Au+Zsvgc4uc7XlTR+5rNUDs8QpqHZqUoHZrepYfkvI9Up\nP3zeVzhsy0vbDkNTrqtF+eev6/fbjWMa2DlLklQYO2cNrPRx9mx8dtCSJpWdsyRJhbFz1kBK75ql\nNnV1X7PGz85ZneV7nyVNKouzJEmFcaytRbEDlQ7MkbaaZHFW5/neZ42SRVmj4FhbkqTCWJx1UI60\nJWm8HGtrQV0qyp6YRKPgSFujYucsSVJhLM6aKl2aBkjqLsfampdFTFqY42yNmp2zJEmFsThr6nha\nT0mlc6ytx7BoSQfmSFvjYOcsSVJh7Jw1tTytpwZhx/wLT/l/i5uwvedP3vG4bc99/seaDqe2G/7z\nf7UdwuPYOUuSVJiDFueIuDjUWthgAAAJCklEQVQidkfEbX3blkXEVRFxd/X5yGp7RMRHImJ7RNwS\nESeNMng1x4OkpoP5rDaV2DVDL67SYltM53wJcPqcbecDV2fmCcDV1XWAlwEnVB8bgI82E6ZGaZqL\n8hT+UXIJ5vPAHGkP54KPv2+i1q6kAn3Q4pyZXwX2ztm8DthUXd4EnNW3/RPZcz0wExErmwpWUj3m\nszQZhj0gbEVm7qou3w+sqC6vAu7ru9+Oatsu5oiIDfT+Gufpy582ZBhSM6b84LBG83n1k0YXqCZT\nSR3ppKh9tHZmZkTkEI/bCGwEWHv8moEfr/qmbJyrRWgin589M/jjSzVJI1k1Y/YPibaP4B72aO0H\nZsdb1efd1fadwDF991tdbZNULvNZKsywnfNmYD3w/urzlX3b3xgRlwHPA37QNy6TijbF//PZfJ7D\njrkZX7r4qLZDmFiLeSvVp4D/BJ4ZETsi4lx6Sfw7EXE38NLqOsAXgXuA7cDHgf89kqhVmyPt6WQ+\na1wmvTC3vZ/8oJ1zZp6zwE2nzXPfBM6rG5Sk0TCfpcng6TunjB3zwU3xeFs40tYvtHlwmMVZktSY\nSR9nl8Jza0uSVBg75yniSHswU35ikqnjOFsLaWO8becsSVJhLM7SAUzhP8aQhub+5uY41p4CFhfp\nwBxp1zMtRfm5z//Y2Ebbds6SJBXGzrnj7Jqb4cFh3WTHrFLZOUuShjYtI+1Zz33+x8Zyak+LsyRJ\nhbE4d5RHGTfPNe0WR9qqY9Tds/ucJUkDm7Zx9rjZOUuSVBg75w5y9DpaHrk92Rxn1/eli4/i9Nd/\n7+eXp9UoT+tpcZaG4L+VnFzv+ZN3tB3CxPvS8z821UV5HBxrS5JUGDvnDnGcLWmUxvH+3kk0itN6\n2jl3gG/xaY/rLmkULM6SJBXG4ixJOihH2gfW9Gk9Lc6SJBXmoMU5Ii6OiN0RcVvftr+OiG9GxC0R\n8fmImOm77e0RsT0i7oyI3xtV4Opxn2f7Jmmfv/msQc12hOP6P8aTrqnueTGd8yXA6XO2XQU8KzN/\nC7gLeDtARKwBzgZ+s3rMP0TEkkYildSESzCfNQTH2uN10OKcmV8F9s7Z9uXM3F9dvR5YXV1eB1yW\nmT/JzG8D24GTG4xXUg3mszQZmnif8+uBT1eXV9FL7lk7qm2PExEbgA0AT1/+tAbCmC6TMkadJh05\nrWftfF79pFGGp3GxUx5eE6f1rFWcI+KdwH7g0kEfm5kbgY0Aa49fk3XimEYdKAIqTFP5/OyZMJ+l\nmoYuzhHxWuAVwGmZOZuMO4Fj+u62utomqWDms1SWod5KFRGnA28FzszMH/XdtBk4OyKeEBHHAScA\n/1U/TEmjYj5rLkfazaizjgftnCPiU8CLgeURsQN4F72jOZ8AXBURANdn5hsy8/aIuBy4g9547LzM\nfHTo6CQ1ynzWgfTvK7VAt+ugxTkzz5ln80UHuP97gffWCUrSaJjP0mTwDGGSpMewa27Oc5//MbY9\n5RkDP87iLEn6+VnAPBNYGSzOkiQVpomTkEiSOsBxdjkszpI0xSzIZXKsLUlSYeIXJwNqMYiIB4Ef\nAnvajmUByykztlLjgnJjKzUueHxsz8jMo9sKZlgR8TBwZ9txLGCSvv+lKDUumJzYBs7lIoozQERs\nzcy1bccxn1JjKzUuKDe2UuOCsmMbRMmvw9gGV2pc0O3YHGtLklQYi7MkSYUpqThvbDuAAyg1tlLj\ngnJjKzUuKDu2QZT8OoxtcKXGBR2OrZh9zpIkqaekzlmSJGFxliSpOK0X54g4PSLujIjtEXF+y7Ec\nExHXRMQdEXF7RLyp2v7uiNgZETdXH2e0FN93IuLWKoat1bZlEXFVRNxdfT5yzDE9s29dbo6IhyLi\nzW2tWURcHBG7I+K2vm3zrlH0fKT62bslIk5qIba/johvVs//+YiYqbYfGxE/7lu/C0cZW1NKyWdz\neei4zOfh42o2lzOztQ9gCfAt4HjgUOAbwJoW41kJnFRdPhy4C1gDvBv4yzbXqorpO8DyOdv+Cji/\nunw+8IGWv5/3A89oa82AFwInAbcdbI2AM4B/BQI4BdjSQmy/CyytLn+gL7Zj++83CR8l5bO53Nj3\n03xefFyN5nLbnfPJwPbMvCczHwEuA9a1FUxm7srMm6rLDwPbgFVtxbNI64BN1eVNwFktxnIa8K3M\nvLetADLzq8DeOZsXWqN1wCey53pgJiJWjjO2zPxyZu6vrl4PrB7V849BMflsLjfCfB4grqZzue3i\nvAq4r+/6DgpJoIg4FjgR2FJtemM1rri4jXFTJYEvR8SNEbGh2rYiM3dVl+8HVrQTGgBnA5/qu17C\nmsHCa1Taz9/r6f3lP+u4iPh6RFwXEf+traAGUNp6AuZyDebz8GrnctvFuUgR8RTgs8CbM/Mh4KPA\nrwDPAXYBf9tSaC/IzJOAlwHnRcQL+2/M3gyllffGRcShwJnAZ6pNpazZY7S5RgcSEe8E9gOXVpt2\nAU/PzBOBtwCfjIgj2opvUpnLwzGfh9dULrddnHcCx/RdX11ta01EHEIvmS/NzM8BZOYDmfloZv4M\n+Di98d3YZebO6vNu4PNVHA/Mjm6qz7vbiI3eL5mbMvOBKsYi1qyy0BoV8fMXEa8FXgG8pvplQ2b+\nJDO/V12+kd6+3F8bd2wDKmI9Z5nLtZjPQ2gyl9suzjcAJ0TEcdVfamcDm9sKJiICuAjYlpkf7Nve\nv9/iD4Db5j52DLEdFhGHz16md/DBbfTWa311t/XAleOOrXIOfSOwEtasz0JrtBn44+ooz1OAH/SN\ny8YiIk4H3gqcmZk/6tt+dEQsqS4fD5wA3DPO2IZQTD6by7WZzwNqPJdHdTTbYj/oHWF3F72/Jt7Z\nciwvoDciuQW4ufo4A/hn4NZq+2ZgZQuxHU/v6NdvALfPrhVwFHA1cDfwFWBZC7EdBnwPeGrftlbW\njN4vlF3AT+ntczp3oTWid1Tn31c/e7cCa1uIbTu9/WSzP28XVvf9w+r7fDNwE/D74/6+Dvkai8hn\nc7lWfObzcHE1msuevlOSpMK0PdaWJElzWJwlSSqMxVmSpMJYnCVJKozFWZKkwlicJUkqjMVZkqTC\n/H9u0hIv+VW9TwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zWKmRev3WKK4", + "colab_type": "text" + }, + "source": [ + "### Visualise Panoptic Results" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "StOBbFmujxIw", + "colab_type": "code", + "outputId": "28e3f307-1c2b-4343-e759-23dd1e965d79", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + } + }, + "source": [ + "\n", + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = vis_demo.run_on_opencv_image(image, panoptic=\"True\")\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()\n" + ], + "execution_count": 51, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XvQXXV97/HP90lEpdIi6qQ0QQIt\ndUqdo5AccMQGR7QnUDQ4dShoERXJdArWy6kQcMYETltDbbH2phOFEjhgRKtCL1jxmtNaKISLoBSN\nFEoygaiI0kq1yfM9f+y1wsp+1v2y12/t/X7NPPPsvfa6/PZ6nt/+7u93/dZa5u4CAADhmOu7AQAA\nYH8EZwAAAkNwBgAgMARnAAACQ3AGACAwBGcAAALTWXA2s9Vmdr+ZbTezdV1tB0C36MvA5FkX5zmb\n2SJJ35T0Kkk7JN0m6Ux3/0brGwPQGfoy0I+uMufjJG139wfc/SeStkha09G2AHSHvgz0YHFH610q\n6eHE8x2Sjs+a2cy4TBmw0Hfd/Xk9t6FSX5aq9+cVK1ZIkrZt25b6WtXpsbTXy6yj7TY1Xa6svDY3\nmTe5TJl9GkvOW2Z7Vf52Q+TuVmX+rsrar5O02t3fGj0/S9Lx7n5+Yp61ktZGT1csXAsw87a5+8o+\nG1CmL0fTa/fn+DPIbOFnl7tXnp5oU+F2s+Zps01Nlysrr81N5k0uU2afxpLzltlelb/dEFUNzl1l\nzjslHZZ4viyato+7b5K0SSJzBgJW2Jel+v256APfzGoFkiEZyvtq8reI52/ri8gs6OqY822SjjKz\nI8zsAElnSLqxo20B6E7vfdnMKn2gF83v7qW+FJTdZry+5OO0n2mS9R7LZu1ZyxK4n9JJ5uzue8zs\nfEn/IGmRpCvd/etdbAtAd+jLQD86OeZcuRGUtYE0vR9zrqPNsnbV5aocF62bpY1vI5Sye9fHnMu2\nocl+lfrfj12pesyZK4QBwBQJIeFCcwRnAAACQ3AGELS2B1TFA4/y1lllm2XWF6+zyvSqyg6oyht8\nVfS+s14vOyAsbx8wIGx/XZ1KBQCp5uZX7Xu8yE8sDE6L/MQFy6U9L5qeJmvbWdss2kbee1nkJxYu\nF2+3iTL7NK0d83Nb92tP2XOSq6LsXg6ZMwAAgSFzBjBxySzNVL2UmbVMlXWNz1s2487KbqtmvfE+\nqPP+i1RZZ/J9F5WVm5adKVuXR3AGMDOqlLy7ltWW5BcXzC7K2gAABIbMGcDUCyljLhK3lQx6thGc\nAUy1IQXmpGS7CdSzh7I2AACBIXMGMJXqZsxtZqnj2W/dNlHqnj1kzgAABIbMGcDUaHJ8uUpW+vpz\n/r7cjOdK111xSuo26rSV49Czg1tGAuGayltGzs2vaj2wlA10edstHXBblgzeUrWg3cZ+pGQ+Gdwy\nEgCAgSNzBsJF5lywnrKyttdXtpwlmUVPKoMmc56Mqpkzx5wBDE7dwBVaMB6XbN91c+UDdReHCtAv\nytoAAASGzBnAYNTJmEPPlrOkZdF575/y9HThmDMQLo45jy1XZH5u62CDcVnxcem8/VFl/xLUJ4PR\n2gAADFztsraZHSbpaklLJLmkTe7+QTM7RNLHJS2X9KCk0939+82bCqArIffnshnzrIgrA9fNnZK5\nb8iGh692WdvMDpV0qLvfYWYHSdom6TRJb5L0mLtvNLN1kp7t7hcWrIuyNrDQxMrak+zPVcvaRcF5\nFkrZWcqUuKX8IE0gn4yJlbXdfZe73xE9fkLSfZKWSlojaXM022aNOjiAgNGfgbC0MlrbzJZLOkbS\nrZKWuPuu6KVHNCqTARiIkPpzW4OeplWZErfEedBD1HhAmJk9S9JfS3qHu/8w+ZqPauapJS4zW2tm\nt5vZ7U3bAKAdofTnuflVpQPzrJa0k9gH06dRcDazp2nUka91909Fkx+Njl/Fx7F2py3r7pvcfeUQ\nTxUBphH9GQhHk9HaJukKSfe5++WJl26UdLakjdHvGxq1EEDnQunPVQY2kS3u74xz10mStnxkY+rr\nDPwalibHnE+QdJake8zsrmjaxRp14uvN7BxJD0k6vVkTAUwA/RkISO3g7O7/KClraPhJddcLYPLo\nz9PjjHPXZWbPEoPDhoIrhAEYHEramHYEZwAAAsNdqQAEgSuBAU8hOAPoVZNLTwLTirI2AACBITgD\nGARK2pglBGcAmDJnnLtu30VJ0hRdHhX9IzgDABAYgjMATKkyGTTCRHAGACAwBGcAAAJDcAYAIDAE\nZwC94QIkQDqCMwAAgSE4AwAQGIIzAACB4cYXACaOY81APjJnAAACQ3AGACAwBGcAAAJDcAYAIDAE\nZwAAAtM4OJvZIjO708z+Nnp+hJndambbzezjZnZA82YWc3e5+yQ2BUytUPozMOvayJzfLum+xPPL\nJH3A3X9B0vclndPCNkrrI0AP5UtB/AUm7aerdffdLlQWVH8GZlWj4GxmyyT9mqSPRs9N0iskfTKa\nZbOk05psA8Bk0J+BcDS9CMmfSLpA0kHR8+dIetzd90TPd0hamragma2VtLbh9pPra2tVUyfOQLP2\nUZMMtey6817P+9sVvY5WBdOfk15/zt93sVogaLUzZzM7VdJud99WZ3l33+TuK919Zd02oB1mVquM\nHAfOOsEz3h6BNwz0ZyAsTTLnEyS9xsxOkfQMST8t6YOSDjazxdG37WWSdjZvJoCO0Z+BgNTOnN39\nIndf5u7LJZ0h6Yvu/gZJX5L0umi2syXd0GAbpQcJZb2WNr3MgKO2BydVWV+bg6GaZLdtalo6Z3BY\ntybRnwGU18V5zhdKepeZbdfomNUVHWwDwGTQn4EetHJXKnf/sqQvR48fkHRcC+tMzfa6HLyUte02\nsua09TEYCiHqoj+Pi+86lXV3qng6d6fCrArulpFlAmjZIDu+TJmgnLbe+HnVYFlmfWmGEpDjgWTx\n46rLSnwBAYA0XL4TAIDADC44x4Ob6mZqaZKn9eTNVydrTq5/mq9+lfUey/yt8q4QRlY93YrK1lll\nb2DaBVfWniazEFjqHiqosvws7EcASBpc5gwAwLQjc54BdQdtASG47opTJHEZzzq2fGRj7uvzc1s5\ndBCoQQbntoPN+OjputeBHl9n3fW1+f6S7cjS5JSuvLaWHSHf9T4AZk2ZoIywUdYGACAwYWTOK56l\nuduOlSQt8hMl5V/4I54nrRxTdFGDMrK2vchPrLX+OuuLl4vfaxNl9mmRSdzZCgAwEkZwjiRLLaaK\np0plzJ81PS0o1g2EWculTS9TTqr63staNH9irXJWW6eXdbE8Zsd1V5zCcWfMDMraAAAEJqjMuUuh\njEgcbwcDMzDruM42sNBUBudQAnEZfPAAI0Wn9czNr5LOnWCDBoqR2tOBsjYAAIGZusx5SFlzEhk0\nUIwLkmQjY54uZM4AAARmajLnJhnzxk+/tcWWjKx77UdrLZd8H3zTxawpOzjsujlOq8J0G3xwrhuU\nuwjIZdZfJWjPza8iQAMpGBy2P0ra04eyNgAAgRl05lwla+46Uy5rvB1FmTRlbsyiovK29FS2eMa5\n6ybSphCRMU+vwQXnMgE5lEBcRtzWMuVuRnRj1pS5peGWj2ycyQCdF5j5jBg+ytoAAASmUeZsZgdL\n+qikF0pySW+RdL+kj0taLulBSae7+/cbtVLlS9hDypqTqmbQfDNG2ybZn6ugxL0/StmzwZrcrs/M\nNkv6f+7+UTM7QNKBki6W9Ji7bzSzdZKe7e4X5q5n5UE+d9uxmf9U01bKLqtMoK7SEQnqg7PN3VdO\namOt9Wez3A+VJv+HZT4L4nVP46lWXZSyOVw2Ge5e6RZ8tcvaZvYzklZJuiLa8E/c/XFJayRtjmbb\nLOm0utsAMBn0ZyAsTcraR0j6jqS/MrMXSdom6e2Slrj7rmieRyQtSVvYzNZKWitJev7TUzcwqxlz\nbOOn31pqNDffeNGC9vpzh8qUuOPXtmg6St1FZWyJrHca1S5rm9lKSbdIOsHdbzWzD0r6oaS3ufvB\nifm+7+7Pzl1XSll71gPzuLwgXbZjEsgHZ2Jl7Vb7c4dl7fH1lDWkAF0mGMfa3I98NnRrYmVtSTsk\n7XD3W6Pnn5R0rKRHzexQSYp+726wDQCTQX8GAlK7rO3uj5jZw2b2Ane/X9JJkr4R/ZwtaWP0+4Yq\n6y36NjxL2XJSXombb75oqqv+3KUqlbasbDSEjLpKppxEf59uTUdrv1ijUy8OkPSApDdrlI1fL+n5\nkh7S6NSLx3LXE5W188xqUE5TdBw6b9Q7HXpQJj1au53+PKGydta665pkoK4bkKX2gzJf7iejalm7\n0XnO7n6XpLQPj5OarBfA5NGfgXA0ypxba0RB5kzWvFCd7JnMeXAmmjm3pc/MOV5/W9rMpptky7Eu\n9huZ82RMckAYAADowOBufAEAecYzwCaZdBvZblNktLOJ4DxQRdfiplQFjJS5s1VI6LOQKGsDABAc\nMueBK7rEJ4PAgPzTC/tG/0QagjOAmVX1ksFdbBdIQ1kbAIDAkDmjU1nn0ZuVO+Wv7PLxfEXrdfdS\n85TZZtr6xpfN2lbZ+aZVCOXkPs36+58GedcIaaM/E5zRmbxAWBRM017v+oI5TbdZJfCPb6Psl4tp\nQEkXQ1b2s6spytoAAASGzHkKlD3neQjyvpWaWWfZc1bWG0/L227TknqX7wvAZCX7c5NKWNDBmWtq\nV1MUpPuQFnTMrNTx20looxNVOYZNEEZVTcZtNBk/kbe+vPmqHIstOpRUpXRc9xBaFZP8jKKsDQBA\nYILOnDFsZb71zsIAqNgsvVc0V2bgUUgDKtPaVHVApVT98FC8XFFW20dlrgmCMyaOY6xAd5qMn2iy\nzeQ2qm6zSUm9aBtDCshJlLUBAAgMmTM6U7WM1NYox6y2ZG0zfr2rb9hzG5YXbmOWznNGNVkXrKl7\nIZ62tdF3qgwwm5WqG5kzAACBIXNGa2z94QumFX3LTVumznLJ7DRN/Pr8hgdb22bR9CrbiNuXXJdf\n8lDp9WL6MKByf7P0XiWCMzoQB5U5La+8bBykssxveLDR8lmvFy1Xd3ttbANI6nJgF4pN6vABZW0A\nAALTKHM2s3dKeqskl3SPpDdLOlTSFknPkbRN0lnu/pOG7cSAVCnzIhz05zDE/SeuEuVVXJJ9LXlo\np6hKU+cQTbJqNb7dOJvsYrvjh6yqnvfdZqZb5jTQtgaX1g7OZrZU0u9IOtrdnzSz6yWdIekUSR9w\n9y1m9mFJ50j6UOOWorSNn35r4XW2uTMQkujPYUgLUkWHctLUWWZuw/Lc5Yper7vdquqez9xW0JzU\nYYWmZe3Fkp5pZoslHShpl6RXSPpk9PpmSac13AaAyaA/A4GonTm7+04z+yNJ/y7pSUmf06js9bi7\n74lm2yFpadryZrZW0lpJ0vOfXrcZCASl7GzxALmQ91Gr/RmVpf1vdDVIsSg7rrPuSQ+orHImQ1cD\nt7oeENakrP1sSWskHSHpcUmfkLS67PLuvknSJkmylQcx7HCg6gScZMey9YdX62iX5HeIf/ynF5Ve\n18tOuLv0vG3Ie599B+5W+7MZ/RloqElZ+5WS/s3dv+Pu/y3pU5JOkHRwVBaTpGWSdjZsI4Du0Z+B\ngDQZrf3vkl5iZgdqVAY7SdLtkr4k6XUajfA8W9INdVbOvZyHoWzJNitrrJI1V8mK21jfpDPrnnXa\nnwFU0+SY861m9klJd0jaI+lOjcpafydpi5n9XjTtijYaitnUdkBue9vTEsDpz2Er8yX22tN+VHp9\n//fu+5o0p3Vv+MyB+z3v+zBPCBqd5+zu6yWtH5v8gKTjmqwXwOTRn4FwcPlONFK3nF2kz4y5imQ7\npyWLxvBUyZqve+2THbaknmtP+9GC7HnWEZwRlKEE5TRZbSdooytVgnLoku/lN/VLPbYkDFxbGwCA\nwJA5T6GsS3dK7V+2s2i0djy9THl7yFlznvh9kUGjTdOUNWMhMmcAAAJD5ozeTWvGPO4f/+lFZM9o\njIx5NhCc0UhXo7UBLERgnh2UtQEACAyZM3o1KyXtGIPDUAcZ8+whOKORuqO1Zy0oA0AVlLUBAAgM\nmfMUyTu/GWFh5DaAPMEG53Wv/Si3jRwARmsDQPsoawMAEJhgM2eUV1TOji/ZOTe/qvVtt3n5TgDA\nCJkzAACBITgDABCYoIPzutd+lBHIBcqWtEPCOc4AkI9jzpgYgjIAlBN05gwAwCwaRObMOc8LFZWz\n//kTuyRJx0+iMQCAVpE5AwAQmMLgbGZXmtluM7s3Me0QM7vZzL4V/X52NN3M7E/NbLuZfc3Mjq3S\nmDjbS8PgsKeUzZqBcZPszwDqK5M5XyVp9di0dZK+4O5HSfpC9FySTpZ0VPSzVtKHqjbonz+xqzBI\nz6qiLyhF+w7QhPszgHoKg7O7b5X02NjkNZI2R483SzotMf1qH7lF0sFmdmhbjQXQDP0ZGIa6A8KW\nuHucoj0iaUn0eKmkhxPz7YimLUjnzGytRt/Gpec/fd/043/jW6MHv/GtzMtNxtnjLA0SK8qYx+3b\nj0CxdvszgMYaDwhzd5fkNZbb5O4r3X2lnvc0SQsDyvzc1tyLaMxCibvOsXYCM+pqpT8DaKxucH40\nLm9Fv3dH03dKOiwx37JoGoBw0Z+BwNQNzjdKOjt6fLakGxLT3xiN8nyJpB8kymWdmOZR3GXeV7Kk\nffxvfCvYrJmrgwUtmP4MYKTwmLOZfUzSyyU918x2SFovaaOk683sHEkPSTo9mv3vJZ0iabukH0l6\ncxuNnJ/bWni7w2QgG/Kx6LJfNMaDcl+ybhUZ23eryF89eAKtQZEQ+jOAYoXB2d3PzHjppJR5XdJ5\nTRsFoBv0Z2AYBnH5TumpuysVZdDSMEdzVy1hx/ouY8eZcVYGXZRZAwAWGkxwjiVHb1cpdY/rK3DX\nPT4+P7eV62QDwIzg2toAAARmcJlzUpmBYlkmMYCsjVHkeed5D8nLTribEdsJLzvh7r6bACBgZM4A\nAARm0JmzVG2gWJY2jk13ca71tGTNeAoZM4AyBh+cY1UGilUx6QucEJABAJS1AQAIzNRkzkltlLon\njYwZABCbyuAcywt4fQVugvDs4ngzgLIoawMAEJipzpzzdDWALGsbAACUFVRwHtIx4jKm7f2gHsrZ\nAKqirA0AQGDCyJy3/Qcl4BkQZ5BcxhMA8pE5AwAQGIIzAACBITgDHWIwGIA6CM6YuFkIWC874e6Z\neJ8AukFwBgAgMGGM1sZg2frDc1/3Sx6aUEv6R6aMrrzhMwdKkq497Uc9twSTQuYMAEBgCoOzmV1p\nZrvN7N7EtPeb2b+a2dfM7NNmdnDitYvMbLuZ3W9m/6urhgOhGNLxZfrzsMUZNKZfmcz5Kkmrx6bd\nLOmF7v4/JH1T0kWSZGZHSzpD0i9Hy/ylmS1qrbUIjl/yUG7p2tYfnlr6jgPaUILauAG3/yrRnwft\nDZ85kCA9AwqDs7tvlfTY2LTPufue6OktkpZFj9dI2uLuP3b3f5O0XdJxLbYXQAP0Z2AY2jjm/BZJ\nN0WPl0p6OPHajmjaAma21sxuN7PbW2gDepKVGceKMushGmC2XAX9eSCmNXue1vdVVaPR2mb2Hkl7\nJF1bdVl33yRpU7Qeb9IODFsy2IV63e0pD8iS6M9DlBbIhjiim4C8UO3gbGZvknSqpJPcPe6MOyUd\nlphtWTQNQMDoz0BYagVnM1st6QJJJ7p78mvajZKuM7PLJf2cpKMk/UvjVmJmjGeofWbSs5AtS/Tn\nISi6nkDSb+qXOmxJN+xF5d/frCgMzmb2MUkvl/RcM9shab1GozmfLulmM5OkW9z9t9z962Z2vaRv\naFQeO8/d93bVePQvPp6c9eERT6973DkvQDYN3LMSfJPoz8AwFAZndz8zZfIVOfP/vqTfb9IoAN2g\nPwPDwOU70Uifl+8synyTmfUsZskAhsueGvvRYyMY3TlYVY6FoRq/5KFt7r6y73ZURX+uj/5UbKin\nZrq7VZmfa2sDABAYMme0gm/87SNznk30pXRDzZhjVTNngjNaw4dKuwjOwPSgrA0AwMCFkjl/R9J/\nSvpu323J8FyF2bZQ2yWF27ZQ2yUtbNvh7v68vhpTl5k9Ien+vtuRYUh//1CE2i5pOG2r3JeDCM6S\nZGa3h1rCC7VtobZLCrdtobZLCrttVYT8PmhbdaG2S5rutlHWBgAgMARnAAACE1Jw3tR3A3KE2rZQ\n2yWF27ZQ2yWF3bYqQn4ftK26UNslTXHbgjnmDAAARkLKnAEAgAIIzma22szuN7PtZrau57YcZmZf\nMrNvmNnXzezt0fQNZrbTzO6Kfk7pqX0Pmtk9URtuj6YdYmY3m9m3ot/PnnCbXpDYL3eZ2Q/N7B19\n7TMzu9LMdpvZvYlpqfvIRv40+t/7mpkd20Pb3m9m/xpt/9NmdnA0fbmZPZnYfx/usm1tCaU/05dr\nt4v+XL9d7fZld+/tR9IiSd+WdKSkAyTdLenoHttzqKRjo8cHSfqmpKMlbZD0u33uq6hND0p67ti0\nP5S0Lnq8TtJlPf89H5F0eF/7TNIqScdKurdoH0k6RdJNkkzSSyTd2kPbflXS4ujxZYm2LU/ON4Sf\nkPozfbm1vyf9uXy7Wu3LfWfOx0na7u4PuPtPJG2RtKavxrj7Lne/I3r8hKT7JC3tqz0lrZG0OXq8\nWdJpPbblJEnfdvfeLoLr7lslPTY2OWsfrZF0tY/cIulgMzt0km1z98+5+57o6S2SlnW1/QkIpj/T\nl1tBf67Qrrb7ct/BeamkhxPPdyiQDmRmyyUdI+nWaNL5Ubniyj7KTRGX9Dkz22Zma6NpS9x9V/T4\nEUlL+mmaJOkMSR9LPA9hn0nZ+yi0/7+3aPTNP3aEmd1pZl8xs1/pq1EVhLY/JdGXG6A/19e4L/cd\nnINkZs+S9NeS3uHuP5T0IUk/L+nFknZJ+uOemvYydz9W0smSzjOzVckXfVRD6WX4vZkdIOk1kj4R\nTQpln+2nz32Ux8zeI2mPpGujSbskPd/dj5H0LknXmdlP99W+oaIv10N/rq+tvtx3cN4p6bDE82XR\ntN6Y2dM06szXuvunJMndH3X3ve4+L+kjGpXvJs7dd0a/d0v6dNSOR+PSTfR7dx9t0+hD5g53fzRq\nYxD7LJK1j4L4/zOzN0k6VdIbog8bufuP3f170eNtGh3L/cVJt62iIPZnjL7cCP25hjb7ct/B+TZJ\nR5nZEdE3tTMk3dhXY8zMJF0h6T53vzwxPXnc4rWS7h1fdgJt+ykzOyh+rNHgg3s12l9nR7OdLemG\nSbctcqYSJbAQ9llC1j66UdIbo1GeL5H0g0S5bCLMbLWkCyS9xt1/lJj+PDNbFD0+UtJRkh6YZNtq\nCKY/05cboz9X1Hpf7mo0W9kfjUbYfVOjbxPv6bktL9OoRPI1SXdFP6dIukbSPdH0GyUd2kPbjtRo\n9Ovdkr4e7ytJz5H0BUnfkvR5SYf00LafkvQ9ST+TmNbLPtPoA2WXpP/W6JjTOVn7SKNRnX8R/e/d\nI2llD23brtFxsvj/7cPRvL8e/Z3vknSHpFdP+u9a8z0G0Z/py43aR3+u165W+zJXCAMAIDB9l7UB\nAMAYgjMAAIEhOAMAEBiCMwAAgSE4AwAQGIIzAACBITgDABAYgjMAAIEhOAMAEBiCMwAAgSE4AwAQ\nGIIzAACBITgDABAYgjMAAIEhOAMAEBiCMwAAgSE4AwAQGIIzAACBITgDABAYgjMAAIEhOAMAEJjO\ngrOZrTaz+81su5mt62o7ALpFXwYmz9y9/ZWaLZL0TUmvkrRD0m2SznT3b7S+MQCdoS8D/Vjc0XqP\nk7Td3R+QJDPbImmNpNQObWbtf0OYIStWrJAkbdu2reeWhGEI+2PFihVl2vddd3/eJNqTo1Jfjuah\nP2dZ8az0yXrBgmnbdH/uvOOvZ00vbFJi20XLltlG2+ura3yfLtjGtv9YuEy5frlv3n2rKrGMu1up\nFUe6ypxfJ2m1u781en6WpOPd/fzEPGslrY2erli4FpQV/w3NKv3tp9YQ9oe7l2nfNndfOYn2ZCnT\nl6Pp9OcS5uZXpU7fa19ZMG2Rn5g77/jrWdOLJLddtGyZbbS9vrrG9+n4Nubnti5YpmS/3DdvrMwy\nVYNzV5lzIXffJGmTxDdtYOjoz9WMBwZT+c/trHkXzdcLcFUCY5l5215fWhAtI2+fZn1JqvKFvusv\n/10F552SDks8XxZNQ0vSKh5Z3+TSMskq85bZdtYyRdsu2lbd7XaxviFk5B2gLwcsK8hMk/H3WDdY\nD01Xo7Vvk3SUmR1hZgdIOkPSjR1tC0B36MtADzrJnN19j5mdL+kfJC2SdKW7f72Lbc2qqtlunXmz\nls/Lgqtuu8zydZersr6idXYxNmMI6MvhaitrfsXFp7WynnFf/IPPdLLeuflVM5E9dzIgrHIjOEbV\nSFHArTJwoShw5W0jr6ydtWyZ16uWmfPaWmZ9ee1py1AGhNVBf84WB9QmwaVuUO4qCNfVRvCuux/7\nCPBVB4RxhTAAAALT22htTF5bpew2t21muYPbusheQ6gWAXWUyZpDy5CzlGlnUXY9zSVugjNmzoyN\ntsYUmKagXMUrLj6tVICWpm8UN2VtAAACQ3AGgIDlZc3zc1s1P7d1KrPm2CsuPq3U+5u2c74pa6NQ\n8rjwpErC8XbaON5dVd0R61Xk7dMZvdgJxhQFm/m5rXrlussn1Jr+xQE6r8w9TSVuMmcAAAJD5jxA\ni957Vur05CjkxevfWHq5utuosv4y266zzbzl0qYvXv/GwvUVnY/dNkaPI6lMxixpprLmpLKDxIae\nPXMRkgFa9N6ztPfSa/puRu+aln8HsB+5CMmUKVN2JTiXlxeki/YxFyEBAACVUNYGgAEhY35KXol7\n6IPDyJwBAAgMmTOCl3ZzjBDGSgBtKzrePM3nM9dVdIrVUDNogjNalzZKusnAq2RA5hxgTCOCMsZR\n1gYAIDBkzmhN0XnOTU9bIlsGMCvInNGKshcZqXIhFGAWUNJGGoIzAACBoayNRupkwgO4MhfQOTJm\n5CFzBgAMXtGXmaHdUpLgDABAYAjOqK2opJ1XumZwGABkq33M2cwOk3S1pCWSXNImd/+gmR0i6eOS\nlkt6UNLp7v795k1FKJoEZYTekKh3AAAbP0lEQVSJ/oxpUPZqYUPQJHPeI+l/u/vRkl4i6TwzO1rS\nOklfcPejJH0heg4gbPRnICC1g7O773L3O6LHT0i6T9JSSWskbY5m2yyJIYdA4OjPQFhaOZXKzJZL\nOkbSrZKWuPuu6KVHNCqTpS2zVtLaNraPyckraaeVs+NpWctxWlV46M/dyyuvcgoVpBYGhJnZsyT9\ntaR3uPsPk6/56C4FqbcPcvdN7r7S3Vc2bQOAdtCfgTA0Cs5m9jSNOvK17v6paPKjZnZo9PqhknY3\nayKG5ubz/2bfT4yR2+GjPwPhqB2cbXQXgisk3efulydeulHS2dHjsyXdUL95ACaB/gyEpckx5xMk\nnSXpHjO7K5p2saSNkq43s3MkPSTp9GZNRAiKMtvPPna6dP7CP3Uye9Zjo1+rD7k+cxscf+4N/RkI\nSO3g7O7/KCnrHn4n1V0vgMmjPwNh4QphAAAEhuCMQnkl7c8+dvqopF1B3vwMDgMAbhmJHKWOMwMA\nWkfmDABAYAjO6EVROZzyNoC6puEqawRnpGr7OHPeuuq2AwDSZN2VakgIzgAABIYBYdgPg8CA/kxD\nORbtIHNG7zj+DIxMQzkW7SA4AwAQGMrakEQ5GwBCQuaMYDByGwBGCM4AAASG4IygStoMDgMAgjMA\nAMEhOAMAEBhGa8+wkMrZWdtefcj1qa8veu9Z2nvpNZNsEtCa+bmtmptflfpafK4zFySZbWTOAAAE\nhswZADAViq6wNj+3VZIyqxYhIXOeUSGXtJMYuQ2gjLKBeSgIzgAABKZxcDazRWZ2p5n9bfT8CDO7\n1cy2m9nHzeyA5s0EMAn0ZyAMbWTOb5d0X+L5ZZI+4O6/IOn7ks5pYRtoSVEpuOgiIH0oc2EStIb+\nHIjPb3yXPr/xXX03Az1pFJzNbJmkX5P00ei5SXqFpE9Gs2yWxPkAwADQn4FwNB2t/SeSLpB0UPT8\nOZIed/c90fMdkpY23AZaUpQxY+bRnzu2174iSTJZ4cjhePrn596lV667fDINHKBpGwgWq505m9mp\nkna7+7aay681s9vN7Pa6bUA5ZUrZQ8DI7e7Qn/O5e2frKAoeIZ72c/P73tl3Ewrtta9or31F7r7g\nJ/6S1ETaeuOfNjTJnE+Q9BozO0XSMyT9tKQPSjrYzBZH37aXSdqZtrC7b5K0SZLMrJ13A6Au+jMQ\nkNqZs7tf5O7L3H25pDMkfdHd3yDpS5JeF812tqQbGrcSiDA4rBv05+6ZmRb5iVrkJy54bX5ua24G\n/cU/+Exh+XYWZe2TvfYVmVnmzyI/sXaWGy+Xte7kPE10cZ7zhZLeZWbbNTpmdUUH2wAwGfRnoAet\nXL7T3b8s6cvR4wckHdfGetHcNBxrTvPZx07PvSmGJG6MURP9uT95N8SQRpkiN8QoHgTWJzNr5bgz\n19YGgDFpH67JaXH5Mjk9bdr49ORo7Sx5g5UW+Yn7zn1+5brL9w3MetVFH9g3T9pgreTrWbIGeZVZ\nto31pb2XNHmBed+hgSkY9cDlOwEACAyZ85Qayo0tgBAls11J+wYAFUnLoqtIbicub49n0snzn5XI\nTstm0ePqLldlfUXrLbO9rkrZZf+2k0ZwnkLTepx5XPxeOPaMEDT5kM8L6ov8xNRSd/LY9M3ve2du\nOTjr9azpr7roA7UCdFE74nnibSS3l6VsUK56sZHkyOoQUdYGACAwZM5TZFZL2Xkjt6XRfiF7Rtfa\nKGUnFV3ec3zbaZf4jDPS8Sy47OCrMrLWXWfZWNyuZNYcVw/GzxNPZsxls+C99pX9Bo1lVS7yKhpl\nthXPs3LlylLtSiI4YypQ4sa0mp/bWjj6eG5+1egYdGQS1+LOOlad/EJQpdSeDNRdlbLjYJl1qKDM\n8kVfwsZH6tctm1PWBgAgMGTOADAFkuXvOIsO8W5WWSX1uFxdJqOtc6eptJHwUrnstswo/OQ8Wee5\nV0FwngKzeqw5DcefMTTJD/OiD/KiK4jF4nm+qLg8vP+x3bKl5zKqXGwkr3RdtsxcJjCnBdzxi8FU\nLU+PT8+66Eza4zrHnClrAwAQGDLngSNrBtqV1aeSGdHi9W8svVzdbWQutyEx4ZLSm8wYZPXOwtHV\nXzrgztTpaftAkva87+rc9aXdlSsva27rkpxlB2a1MV8bFzWxEE7A5v6v1ZT5EIhLtzef/zetbvuV\nf3aqPv+2v211nV3JK2/HAi9xb3P36vWwng29P3d56CMuN9c5ZlpmvW3KOnWpyvLjy6ZNy1su7TSp\nssd986bNza8qte6yhxtKjuKuFLEpawMAEBjK2lMo+a3/VX/+6n3f/ppkvK/8s1Mbt2vSss593nPJ\nqOyWVZYDhqbsQLE21cmsk+cX5y1XdzR2kq0/fPQ7cZnOuQ3LFywXzxc/9kseWjC9SJVBfWWROQMA\nEBgyZ5QylOPM06Dtb+CYDclss0oWnRyMVeVc47Tlx8VZ6H42FC+XlrXGWW/WOKm5Dcs1v+HBBcsn\nH8evj1ZUftvJaanvKSGtfVy+E0iRd+4zl/XENEgOEt176TWpZeGsgJ0MyFVK1Wkl4jKKltsvgFZc\nvu5rbSzT9pdpytoAAASGzHlAik6hSmZ/aaWV5KCuZJk67fSoKvPmbScpbbnx9aUtW6aknjdgrW5J\nPqt8lvUNOdSbtiM8bQ3esg0LS7CL3nuWfEN+6TVNlYFdRaVdNEdwHoi8wJxWks06z6/ovOc4yNUJ\naGnLVhnlXfQloexyacvE+yhrPybPbS0653FcCNcKwDC0eX5z3mdCHLTTPhsWv+aRwnXvufFn09db\nYQTztIr3QddfUChrAwAQmEaZs5kdLOmjkl6o0di3t0i6X9LHJS2X9KCk0939+41aic41yZjj5dOW\njaflZcBF2856PWt6mW2mqXs1oGkpZdOfh6HOZUKrDnhc/JpHFmTPoWfN2256qVac/NV9z9POV86b\nFgvlfTYta39Q0mfd/XVmdoCkAyVdLOkL7r7RzNZJWifpwobbQUte9eev3vc4WeJuEpSbLF+07Off\n9repQbbLy4jOcJma/hy4KoG5qbj8nVXiTooD3PilT+847szK273zptdrm15aeTlpFKD3OS56fJNS\npx37Lx9LXUedi5B0oXZZ28x+RtIqSVdIkrv/xN0fl7RG0uZots2STmvaSADdoj8DYWmSOR8h6TuS\n/srMXiRpm6S3S1ri7ruieR6RtKRZE9GVOIv2P5vuTDFZLShjWsrUFdGfA1bmTI28eRa99yzZXe9v\nu1mDFmf1WRl035oE58WSjpX0Nne/1cw+qFHJax9396w71JjZWklrG2wfLSobwEIK5EVtDqmtA0B/\nRqvqlLTxlCajtXdI2uHut0bPP6lR537UzA6VpOj37rSF3X2Tu68c4i3xgClEfwYCUjtzdvdHzOxh\nM3uBu98v6SRJ34h+zpa0Mfp9QystRaayg0SKyl5l7XcpzPWjx3v+7Orcuzzt0ampl9DMmj4+T9p2\n44FbRXeXansQTdbFRtq6CEkfZXX68/AVncvvL363JFUub8cDw/bq+FLzkzG3o+lo7bdJujYa2fmA\npDdrlI1fb2bnSHpI0ukNt4GWxLdKlLq5XWJy/ZPU1nbjfVI0WrvsxUkGiP4coKoXICo6/uwvfjfH\nnxPuOO7MII87NwrO7n6XpLQy1klN1gtg8ujPQDi4fOcMKMqSq2TRWfPWzcTLLFd3m1kZddZycRZi\nGRdsyMpGZnR0NzpW5Vr6aCbEkdtcvhMAgMAQnAeCb8ndYv9imuy99Jrc/2l/8bv3DRBrwx3HnclA\nsJZR1h6QotGYs2p8oFuVAWIEZYSmjXL2Hz56riTpor/4r1bahMkjcwYAIDBkzgNU9M15/OLzIcrK\nDuq02y69Zt/pTHsuuZoBWphqcVZcxvvOe4ak7Ay6zGlV8Y0v7Jgw7tY0K8icMXFFF0OpU7Y3s30/\nwFB1NUI7DtJp2j7+PGQhHTcnOAMAEBjK2piYSdwkHphmVUrabal6+U60g+CMoA3h+DnQVNEX17yy\ndFltHH/G5FDWBgAgMGTOmAjOzQYwBPGgsBX6aq/tIDijU1VGn2bNy/FnTLNJlLQxPJS1AQAIDJkz\nglF0H1oGh2Ga9JUxv++8Z+QOCpPSq1RchGSyCM7oTNWbxAOYHcfcep3uPP71fTcjWJS1AQAIDJkz\nglJ05y0Gh2Ea5FWVJjEArOic56xDSPMbHpQkzW1YXnvbx9x6Xe1lQxNf07+LywaTOQMAEBgyZ7Su\njYv3l8mgyZ6B4eE4czkEZwCYEM5pRlmUtQEACEyjzNnM3inprZJc0j2S3izpUElbJD1H0jZJZ7n7\nTxq2EwPQxb1o8859ZnBYu+jPsyfvnOe4f+255OoFr8UDw6T9B4ftueRq3fnZp8rW44O/kiXtMqdS\nZQ0eS1tufH1py5YpqcfLzae8VmcgnLtr5cqVlZerHZzNbKmk35F0tLs/aWbXSzpD0imSPuDuW8zs\nw5LOkfShutsB0D36c/f6HqFdtO2sIL14/RslSfO2tdJo7SbnMccBsijYVtl2meWTy604+av7fQmp\nw91rj+RuWtZeLOmZZrZY0oGSdkl6haRPRq9vlnRaw20AmAz6MxCI2pmzu+80sz+S9O+SnpT0OY3K\nXo+7+55oth2SlqYtb2ZrJa2tu32EpYuS9viyjNzuDv25O7M2CCwt6626fNqy8bS8DLho21mvZ02P\nKwRVM+g2zn9uUtZ+tqQ1ko6Q9LikT0haXXZ5d98kaVO0Lq/bDvSry6Ccti6OP3eD/oy8489VtFnK\nbnPbdx7/+tTA3uVlROMgXeeYc5Oy9isl/Zu7f8fd/1vSpySdIOngqCwmScsk7WywDQCTQX8GAtJk\ntPa/S3qJmR2oURnsJEm3S/qSpNdpNMLzbEk3NG0kpttlDzxRar4Ljzyo45bMNPpzB0IdBJalaHAY\nqumlrO3ut5rZJyXdIWmPpDs1Kmv9naQtZvZ70bQrarcOQatS0i4bgPNc9sATuviq3268HixEf27X\nrB1nRvsanefs7uslrR+b/ICk45qsF8Dk0Z+BcHD5TlQ26Yw56Q/e9JeSlJlBM3IbaK6twWFVJEdj\nT/L628mBYiFd95vgjNa1HZDT/MGb/jI3QEuM3Aa6Mn6FsP+5ehRK9kj7HhfJmnePii8YkrVc0bbj\ncwLH54unJ7ebdoWwpGSSMp6wxMea+xqtDQAAOkDmjEryStpxyRmYVdM+ECzrsp23fXaUey7WG/c9\nLnLnZ1+v2zKmF8laLm162rrH50vb5oqTv7rvcdpFSPZeeo0spTqX/B9oMlqbzBkAgMAQnDG1Fr33\nrMJMBmjLtGfNmCzK2iil6IOnj5J20cjtGCO4AZSx30C0Dcsb35WqCTJnAAACQ+aMwcs7rQroGuXs\n6ZE853l+w4Ol7lvdFYIzCg1hhHaZi5NInPuMdhGYp098IZLkaO0+UNYGACAwBGdkKhrtHErWnFTU\nJkZvAxgCytoAUNEslbPnbWvu69tueumEWjJbyJwBAAgMmTMAVJCXNU9Txhyb81WSsjPoFSd/tbPs\nuc1BWdtuemnvg7yqIDgj1dCONScxchuYrGTQW3Fyjw3JMaTALFHWBgAgOGTO2E+Il+msq0wGTfaM\nsmZpEFiaOV9VODhsFvglD01kOwRnAEApRcefm663LyF+6aCsDQBAYMicIWm6ytnj8q69zeAwlDFr\nI7ST4vd30V/8175pbZS4+86Wk7qqCDRB5gwAQGAKM2czu1LSqZJ2u/sLo2mHSPq4pOWSHpR0urt/\n38xM0gclnSLpR5Le5O53dNN0oDwGh43Qn9GWutlmSBnzuDlfJbvr/ZKkPTf+bK9tKVPWvkrSn0u6\nOjFtnaQvuPtGM1sXPb9Q0smSjop+jpf0oeg3AjbNJW0scJXoz6UV9Y29l14jnXfuhFoTpjlftV/p\nO63Mf/G5/0eS5C9+90TbVkfcxsV6pNcAXVjWdvetkh4bm7xG0ubo8WZJpyWmX+0jt0g62MwObaux\nAJqhPwPDUHdA2BJ33xU9fkTSkujxUkkPJ+bbEU3bpTFmtlbS2prbB9Ae+jMKve+8Z+w3KCwpa3ps\nCBlzaBqP1nZ3NzOvsdwmSZskqc7yaKbMrROnsZzNyO189OeRUuXsGZQ2crsNm5755VbXV8baJ18+\n8W1WUXe09qNxeSv6vTuavlPSYYn5lkXTAISL/gwEpm7mfKOksyVtjH7fkJh+vplt0WjgyA8S5TIE\noEzGLE1n1hzjxhgL0J8rmKH/i8aKMuw+MubxbWdl0P7id496QE/KnEr1MUkvl/RcM9shab1Gnfh6\nMztH0kOSTo9m/3uNTrvYrtGpF2/uoM2oaVZL2XgK/RmT8pwrP6vRv1rYNj3zy5kBus8v6oXB2d3P\nzHjppJR5XdJ5TRsFoBv0Z2AYuHwnJM1mxpw3OEyanQuTYIRBYO0KfcBVUlGJu48MmuCMmcbxZ0jN\nAvMFSz4iSfrDR2f7YiSxsiO5+zzePARcWxsAgMAQnGfAoveelZsZzGJJG2hbnEEj36ZnfjnYrLmo\nXWXPdmkDwRkAgMBwzHnKkTEDCEWoGXNSKIPDCM6ARh0t74vMJMtZGK5pHByWVq6v2h+GEJRDQ1kb\nAIDAEJyByN5Lr+GUKbTigiUfGfQAsay2Fw0unSZ9Dw6jrD2lSp23yTFnoFMXLPnIYErcXXyZGHo5\nu8/jz2TOAAAEhsx5xlC2TXfZA0/owiMPkvTUPsqqPnBZz9nTpITZ9r2Pu5L3/17H0LPmvhGcAQCt\nISi3g7I2AACBIThPkaKRlJRiq8nbX7M0anUW0DcQGsraU6DuHXXiY6zS6JjrLEvuC8ymorEGKDat\nJe0yo7bb/oJH5gwAQGDInCFpYeY4K5k0GTPGFV3KFbNr0zO/XHjOc1sIzkh14ZEHTWWAJhijDI5B\np19ggy8tk0NZGwCAwJA5z6jkN+AyA8bKrCcW34ry4qt+u2bryiG7ASYnL2ue1oFgaSY1cJDMGQCA\nwJi7589gdqWkUyXtdvcXRtPeL+nVkn4i6duS3uzuj0evXSTpHEl7Jf2Ou/9DYSPM8huBXBwH6k/H\n2fs2d1/Z5grpzyir7OfK3kuv0RXvO6zj1oTjnIse3ve4ymms7m5VtlOmrH2VpD+XdHVi2s2SLnL3\nPWZ2maSLJF1oZkdLOkPSL0v6OUmfN7NfdPe9VRqFajg/c/IGXFK/SvRnoJJkQJ6UwrK2u2+V9NjY\ntM+5+57o6S2SlkWP10ja4u4/dvd/k7Rd0nEtthdAA/RnYBjaOOb8Fkk3RY+XSkp+xdgRTVvAzNaa\n2e1mdnsLbYBG2dyAM7rBmPJ9TH9GKcnPm3MueriX7LJvRZ+5TaqZjUZrm9l7JO2RdG3VZd19k6RN\n0Xo4RtUiLqLQjSkPyvRnNDYeoId8LLrvLxu1g7OZvUmjgSUn+VOjynZKSv41lkXTAASM/gyEpVZw\nNrPVki6QdKK7/yjx0o2SrjOzyzUaQHKUpH9p3EpUVnRVn7au+pOWTaZdWagLk6wOTHPWTH9GV9Ky\nz9Cy6b4z5CxlTqX6mKSXS3qupEclrddoNOfTJX0vmu0Wd/+taP73aHTcao+kd7j7TePrTNkGZTBg\noS5OpaI/o1DdO9010UbQ7ivQlrlVb+unUrn7mSmTr8iZ//cl/X6VRgCYDPozMAxcvhMAUEkXh5TW\nPtl8HYve23wdoeDynQAABIbgDABAYAjOAAAEhuAMANjPNJ86OElN9iPBGQCAwBSe5zyRRph9R9J/\nSvpu323J8FyF2bZQ2yWF27ZQ2yUtbNvh7v68vhpTl5k9Ien+vtuRYUh//1CE2i5pOG2r3JeDCM6S\nZGa3t33BhbaE2rZQ2yWF27ZQ2yWF3bYqQn4ftK26UNslTXfbKGsDABAYgjMAAIEJKThv6rsBOUJt\nW6jtksJtW6jtksJuWxUhvw/aVl2o7ZKmuG3BHHMGAAAjIWXOAABABGcAAILTe3A2s9Vmdr+ZbTez\ndT235TAz+5KZfcPMvm5mb4+mbzCznWZ2V/RzSk/te9DM7onacHs07RAzu9nMvhX9fvaE2/SCxH65\ny8x+aGbv6GufmdmVZrbbzO5NTEvdRzbyp9H/3tfM7Nge2vZ+M/vXaPufNrODo+nLzezJxP77cJdt\na0so/Zm+XLtd9Of67Wq3L7t7bz+SFkn6tqQjJR0g6W5JR/fYnkMlHRs9PkjSNyUdLWmDpN/tc19F\nbXpQ0nPHpv2hpHXR43WSLuv57/mIpMP72meSVkk6VtK9RftI0imSbpJkkl4i6dYe2varkhZHjy9L\ntG15cr4h/ITUn+nLrf096c/l29VqX+47cz5O0nZ3f8DdfyJpi6Q1fTXG3Xe5+x3R4yck3SdpaV/t\nKWmNpM3R482STuuxLSdJ+ra7P9RXA9x9q6THxiZn7aM1kq72kVskHWxmh06ybe7+OXffEz29RdKy\nrrY/AcH0Z/pyK+jPFdrVdl/uOzgvlfRw4vkOBdKBzGy5pGMk3RpNOj8qV1zZR7kp4pI+Z2bbzGxt\nNG2Ju++KHj8iaUk/TZMknSHpY4nnIewzKXsfhfb/9xaNvvnHjjCzO83sK2b2K301qoLQ9qck+nID\n9Of6GvflvoNzkMzsWZL+WtI73P2Hkj4k6eclvVjSLkl/3FPTXubux0o6WdJ5ZrYq+aKPaii9nBtn\nZgdIeo2kT0STQtln++lzH+Uxs/dI2iPp2mjSLknPd/djJL1L0nVm9tN9tW+o6Mv10J/ra6sv9x2c\nd0o6LPF8WTStN2b2NI0687Xu/ilJcvdH3X2vu89L+ohG5buJc/ed0e/dkj4dtePRuHQT/d7dR9s0\n+pC5w90fjdoYxD6LZO2jIP7/zOxNkk6V9Ibow0bu/mN3/170eJtGx3J/cdJtqyiI/RmjLzdCf66h\nzb7cd3C+TdJRZnZE9E3tDEk39tUYMzNJV0i6z90vT0xPHrd4raR7x5edQNt+yswOih9rNPjgXo32\n19nRbGdLumHSbYucqUQJLIR9lpC1j26U9MZolOdLJP0gUS6bCDNbLekCSa9x9x8lpj/PzBZFj4+U\ndJSkBybZthqC6c/05cbozxW13pe7Gs1W9kejEXbf1OjbxHt6bsvLNCqRfE3SXdHPKZKukXRPNP1G\nSYf20LYjNRr9erekr8f7StJzJH1B0rckfV7SIT207ackfU/SzySm9bLPNPpA2SXpvzU65nRO1j7S\naFTnX0T/e/dIWtlD27ZrdJws/n/7cDTvr0d/57sk3SHp1ZP+u9Z8j0H0Z/pyo/bRn+u1q9W+zOU7\nAQAITN9lbQAAMIbgDABAYAjOAAAEhuAMAEBgCM4AAASG4AwAQGAIzgAABOb/Axcbmm6Kh2yMAAAA\nAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8qYiiUqGWR1r", + "colab_type": "text" + }, + "source": [ + "### Visualise Instance Seg + Obj Det Results" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PWwT_DbRT2ID", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "outputId": "21794a6f-800b-4727-e899-41df4cb44ca3" + }, + "source": [ + "\n", + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = vis_demo.run_on_opencv_image(image, objDet=\"True\")\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()" + ], + "execution_count": 52, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X+0XXV55/H3k5ukoMUGDCukCTFg\nUYmu4VcIUCuwxDqBUoOrmoKdGhXNzBq0akdt0HYRZ1rB4ujYjtVGYQxTMAYrhlWVgoxgp0qAQJQf\nEYwYJFkhQSPVpY5wk2f+OPsk+56799n77B9nf/c5n9daWffcffaP5+7km+c+z/7ufczdERERkXDM\naDoAERERmUrJWUREJDBKziIiIoFRchYREQmMkrOIiEhglJxFREQCU1tyNrPlZvaImW03szV1HUdE\n6qWxLDJ8Vsd9zmY2ATwK/C6wE7gHuMTdH678YCJSG41lkWbUVTkvA7a7+2Pu/gywAVhR07FEpD4a\nyyINmFnTfhcAT8S+3wmckbbynOdN+PyjZ9UUikg7ffexX/3I3Y9uOIyBxjLAzLlzfNYL5uc+wEvt\nuQA85D9PfG/Q5V1J7+fZR9Uxld0ur34xl1l30jr/N5/EbL7NM33XPYnZB1/H1+0u77d92rajYP+O\nnRz40T4bZJu6knMmM1sNrAY4Zu5M1l+5qKlQRIJ0xh9+7/GmY8grPp5nLTqG4zdfl3vbe2eeDsBL\nJ+9JfG/Q5V1J7+fZR9Uxld0ur34xl1n3Rzavs83EIubt/2H//U4c+n88vm53eb/t07YdBfvOuHDg\nbepKzruAY2PfL4yWHeTu64B1ACe+8DA94FskTJljGaaO58NPOzH3eH4oIzG9dPIeHhogkbRRW36u\neft/yJ4cSTZtW4A9ORK8dNSVnO8BTjCz4+gM5IuBN9R0LBGpT+NjedDklbV+nmQ/yDEfilXr8dej\nbM9EcqcznsCLbNvv/XFTS3J290kzezvwz8AEcK27P1THsUSkPhrLIs2o7Zqzu38F+Epd+xeR4dBY\nzqct7ekyslrSWW3rfu8VbZmPKj0hTERkhIxLa33UKTmLiIgERslZRIL20MzTK60GXzp5z5RZ4GWP\nmWd/3X0OsnxQ3TiydGfIJ62b9XPvmViUOGGruzxP2ztt+bz9P1RLO6ax+5xFZDx9d+I9B19POOyf\neWff9Sf8nOhFfOmdU/aTvTzJnamJKPmY2cful9gm/JzU/XW3O3jcEvKc06T4X7L/IwdfZ93iVmZG\ntWZj56PKWUREJDC1fPDFoE584WGuJ4SJTHXGH35vi7svbTqOQR1+2one7wlh3514z5QqLRT5K+5q\nhHIOuj93WjzdJ4RJcfvOuJBn7/1OOx7fKSIybMNOwP2kxRJK0pZmqa0tIiISGFXOIjLyQqqYs2S1\nmWU8KDmLyEhrU2KOi8etRD1+1NYWEREJjCpnERlJRSvmi9+2prIYNnz6qin7jX8/CLW6x48qZxER\nkcCochaRkVHm+nKVFXPaPuPfF6midR16fCg5i0jr5U3KdSTgotJiyZu0Q32Yi1RDbW0REZHAqHIW\nkVYapIUdUsWcpRtrngpaE8VGl5KziLTOqCbmuEGuT6vFPXrU1hYREQmMKmcRaY1xqJiT5Gl1q8U9\nWpScRaQV8iTmUUrISfK0utXiHg1qa4uIiASmcHI2s2PN7Otm9rCZPWRm74yWH2Vmt5nZ96KvR1YX\nrojUIeTx/N2J92RWzRe/bc3IV829+v28ec6ZhK1M5TwJ/Bd3XwKcCVxmZkuANcDt7n4CcHv0vYiE\nrbXjedySclzWLyVK0u1VODm7+253vy96/TNgG7AAWAGsj1ZbD1xUNkgRqZfGs0hYKrnmbGaLgVOA\nzcA8d98dvfUkMK+KY4jIcIQ0nvtVfePYyk6TdR5UPbdP6dnaZvbrwD8C73L3n5rZwffc3c3MU7Zb\nDawGOGauJo2LhKCK8Txr0TGl48hzjVlklJWqnM1sFp2BfL27fzFavMfM5kfvzwf2Jm3r7uvcfam7\nL53zvIkyYYhIBaoazxNz5wwnYJERVrhktc6v1NcA29z9o7G3bgZWAVdFXzeVilBEahfKeFbFXFzW\ng0r0kJJ2KdNPfjnwx8ADZrY1WvZ+OoN4o5ldCjwOrCwXoogMgcazSEAKJ2d3/7+Apbx9XtH9isjw\naTyPjovftibzMZ+qnsOnJ4SJiIgERslZREQkMErOIhIETQYTOUQ3GItIo5SURaZT5SwiIhIYJWcR\nEZHAKDmLiIwYfVpV+yk5i4iIBEbJWURkROWpoCVMSs4iIiKBUXIWEREJjJKziIhIYJScRaQxegCJ\nSDIlZxERkcAoOYuIiARGyVlERCQw+uALERk6XWsW6U+Vs4iISGCUnEVERAKj5CwiIhIYJWcREZHA\nKDmLiIgEpnRyNrMJM7vfzP4p+v44M9tsZtvN7PNmNrt8mNmWrXyUZSsfHcahREZWKONZZNxVcSvV\nO4FtwPOi7z8MfMzdN5jZp4BLgU9WcJxclq18lLs3vmhYh2vsmEX0++WlbPxp+86z3zrjkoEFNZ5F\nxlWpytnMFgK/B3wm+t6AVwJfiFZZD1xU5hgiMhwazyLhKFs5/w/gfcAR0ffPB55298no+53AgqQN\nzWw1sBrgmLnlC3hVWOm6lWnaOSpzOSDvvvu93+/vri1diRFRyXieteiYmsMUGX2Fs6KZXQjsdfct\nZnbuoNu7+zpgHcCJLzzMi8Yh5d298UWZSTRJmcRZ5HhSnyrH8+GnnajxLFJSmZL15cBrzOwC4DA6\n16g+Dswxs5nRb9sLgV3lwxSRmmk8iwSkcHJ298uBywGi37Tf4+5/ZGY3Aq8DNgCrgE1FjzHIJKO0\nSixpeXy/g7R6y1R5g+yvyqoylLawquywDWM8i0h+ddzn/GfAn5rZdjrXrK6p4RgiMhwazyINqORT\nqdz9DuCO6PVjwLKy+0yrtOqcvJR27LL3T6ftT5OhJER1jOdeL9n/ESD906k2fPoqQJ9OJeMruI+M\nzJNA65q8lLbf7veDJss8+0vSloRcdCJZfH39AiIiMp0e3ykiIhKY4CrnLEWrrCoq8SJVc+/rUZXV\nBeh3DrKeEDYO529cvWT/R1Jb29Bpb6u1LeOodcm5TcahXVv0UsEg24/DeRQRiVNbW0REJDCqnMeA\n7hMWGU/dWe9psi4rSHNamZyrTja910WLPge6d59F91flz5fnmm2ZW7r6xZp3hnzd50Bk3ORJyhI2\ntbVFREQCE0Tl/N0jZ3HW6+d3vvFzANjfp9qbiNahuw2wP/p6VmxZv+VJuuumVZoTfs6UY8a3S9p/\nmf11tzv4s5aR45xmGcYnW4mISEcQybnrWzfuPvj6btLbmd9KWNZdv/e9tOVJybRoIkzbLml5/GdM\n+jlg6s+etk4RE68/Z8rx8+rXWi7bdlbbWkRkOrW1RUREAhNU5VynPG3tYeiNo0glKzJK9JxtkelG\nMjmHkojz6MaqJC3jTk8Lq0bVM7Xn+p4y4QjwU3924G3U1hYREQnMyFXObaqa41RBi0gZurd5tKhy\nFhERCczIVM5lKuarbnprhZF0rHntZwptF/85VEXLuNHkMJGOVifnO2f+ReEkWEdCzrP/QeI96/Xz\nlaBFEmhy2FRqaY8etbVFREQC08rK+c6ZfwEMVoXWXSnn1RtH1s+gNreMo6z2NqjFDaqYR1nrknMn\nWfVPaKEk4jy6seb5RUMzumXc5PlIw3FtcfdLzErK7ae2toiISGBKVc5mNodOGfsywIG3AI8AnwcW\nAzuAle7+k1JRkn82dpuq5rhBK2hVz1K1YY7nQajFPZVa2eOhbOX8ceAWd38JcBKwDVgD3O7uJwC3\nR9+XkicxX3XTW1ubmOO6P0fWz3LW6+e39oErEqyhjOei8iSdrMTVdlmtbCXm0VE4OZvZbwBnA9cA\nuPsz7v40sAJYH622HriobJAiUi+NZ5GwlGlrHwc8BfwvMzsJ2AK8E5jn7t2e65PAvKSNzWw1sBqA\nRb+WeIC8FfOouuqmt+aaza0Wt1SgsvE8a9ExtQU5SIu7q+2t7jzdAFXMo6dMcp4JnAq8w903m9nH\n6Wl5ububmSdt7O7rgHUAtvSIaeuMe2LuGuRatEgJlY3nw087MXGdKuWZxd3Vttncg7TmlZRHV5lr\nzjuBne6+Ofr+C3QG9x4zmw8Qfd1bLkQRGQKNZ5GAFK6c3f1JM3vCzF7s7o8A5wEPR39WAVdFXzcN\nst+sinkcquUk/Vrcuv9ZyqprPNcpXjXmuRc6SQgVddFJbKqaR1vZh5C8A7jezGYDjwFvplONbzSz\nS4HHgZV5d9YvMY9rUo7LanErSUtJlY7nYcpzLTpJPDEOM1GXmVWupDweSiVnd98KLE1467wy+xWR\n4dN4FglHKx7fqap5qqxZ3JrBLeNqkIlivZKq2Sqr6SruwVbVPD70+E4REZHAtKJyFhHJq7e6LFpJ\nQxhPHFO1PJ6UnFtKk8NE8inT6m6CkrGA2toiIiLBUeXccpocJpItrRoNoaJWpSxJlJxFZGwN8iCT\nuo4rkkRtbRERkcCocpZaLVv5aOLyuze+qNLtu+tl7XfZykdzrZPnmEn769027Vh51xtVIbSTmzTu\nP/8oeGjm6anvvXTyntL7V3KW2vRLhFnJNOn9tKRZlbLHHCTx9x4j7y8Xo0AtXWmzblJOS8D9kvYg\n1NYWEREJjCrnEZD3nuc26FdB3r3xRbVVz2lVb3dZv+OWbanX+XOJyHC9dPKezOo6j6CTs56pPZis\nJN2EpKRz98YX5bp+OwxVtJMHuYatJCyDSmuT5vmPf5BtH5p5euY+8ySdQa7FJu0vvv0greOsdau4\nDpznHFVFbW0REZHABF05S7vlmak8DhOgusbpZ5Xy8kw8GqRarGqiUj+9MQ1yzLwVbtoxsqraYVa9\nVVBylqHTNVaR+qQloe6yOpJ0WmLNe8wyLfWsY7QpIceprS0iIhIYVc5Sm0EneMUr6qpbwFkPFqlz\nMtqMtYsBOLB2R+H7vmV89VaE3Uowz+SqYaiiXTzIBLNhtOdDoMpZREQkMKqcpTJnPvzMlO8PkH3r\nUNI2FNiOWHWaJF69Ttu2YKzd7ZKWp0k7Rjc+ka48txG19XpqEeP0s4KSs9TgriWzgWIJJ2ubtOSb\nd/u094smx0G2UwKWKtQ5sUuyDevygdraIiIigSlVOZvZu4G3Ag48ALwZmA9sAJ4PbAH+2N3z9/2k\n9QZp80o4NJ7DsO0vXw8c6hKldlzW7ji4LhyqtA6s3dG/S9OzXeby2H57Y+weN0+svdv1XR4T/7lg\n8Pu+q6x044/mTFPV/dSFk7OZLQD+BFji7r80s43AxcAFwMfcfYOZfQq4FPhk6Uglt6tuemvmc7a/\ndePuYYYkgdN4DkNSksq6lJOkyDYz1i7uu13W+0WPO6ii9zNXlTSHdVmhbFt7JnC4mc0EngPsBl4J\nfCF6fz1wUcljiMhwaDyLBKJw5ezuu8zsI8APgV8Ct9Jpez3t7pPRajuBBUnbm9lqYDUAi36taBgS\nCLWy0/3Lq+cA8Ipbn244knRVjudZi46pP+ARk1Qx1zVJMas6LrLvYU+oPPHPb8y9XV0Tt+qeEFam\nrX0ksAI4DngauBFYnnd7d18HrAOwpUd40TikWUWScjdZQSdhxb/P8q/fPHnaPtqiX8xNJ+4qx/Ph\np52o8SxSUpm29quAH7j7U+7+LPBF4OXAnKgtBrAQ2FUyRhGpn8azSEDKzNb+IXCmmT2HThvsPOBe\n4OvA6+jM8FwFbCqyc32Wczt072nOqqDTqsY2VsAjqtbxLCKDKXPNebOZfQG4D5gE7qfT1voysMHM\n/jJadk0VgYpIfTSew/YP394W9P6q9h9OOrHpEBpX6j5nd78CuKJn8WPAsjL7FZHh03gWCYce3yml\nFG1ni0gzbnjtL5sOQXLQ4ztFREQCo+QsIiISGLW1R1Daozuh+sd2Zs3W7t6/q/a2iEh+qpxFREQC\no+QsIiISGLW1pRTN1hYRqZ4qZxERkcAoOYuIiARGyVlKuWvJ7IMztpO84tanG//EJRGRtlFyFhER\nCYwmhI2Qfvc3i4hIewRbOSvRtMOZDz/Td8b2v7x6jmZsi4gMKNjkLCIiMq7U1h4BWV2G7iM7z3r9\n/MqPrcd3iohUT5WziIhIYJScRUREAhN0cl7z2s9oYliGvC1tERFpj6CTs4iIyDhSchYREQlMK2Zr\nr3ntZ7jqprc2HUZQ1M4WERldqpxFREQCk5mczexaM9trZg/Glh1lZreZ2feir0dGy83M/sbMtpvZ\nd8zs1EGC6VftaXLYIaqapahhjmcRKS5P5fxZYHnPsjXA7e5+AnB79D3A+cAJ0Z/VwCcHDehbN+7O\nTNLjKusXlKxzJ8KQx7OIFJOZnN39G8C+nsUrgPXR6/XARbHl13nHXcAcM6v+sVQiUojGs0g7FL3m\nPM/duyXak8C86PUC4InYejujZdOY2Wozu9fM7uWpZ6e9rxb3VKqYpUaVjuf9P9Lnd4uUVXpCmLs7\n4AW2W+fuS919KUfPSlxHLe7x/EVEmlPFeJ6Yq+eoi5RVNDnv6ba3oq97o+W7gGNj6y2MlolIuDSe\nRQJTNDnfDKyKXq8CNsWWvzGa5Xkm8G+xdlktRrmyzPNzqZ0tFQhmPItIR+ZDSMzsc8C5wFwz2wlc\nAVwFbDSzS4HHgZXR6l8BLgC2A78A3lxFkN+6cXfmxx3GE1mbH1iS9xeNUJJy2kdFdumjIsMSwngW\nkWyZydndL0l567yEdR24rGxQIlIPjWeRdmjF4zvhUKWYVUHDoeqzTRV0W1vYdy2ZDaRX0K+4tTNz\nVxW0iEh+rUnOXfEENUiru1dTibvo9fEQE7OIiNRDz9YWEREJTOsq57g8E8XSDGMCWRWzyFUxi4iM\nH1XOIiIigWl15QyDTRRLU8W16TrutVbVLCIynlqfnLsGmSg2iGE/4EQJWURE1NYWEREJzMhUznFV\ntLqHTRWziIh0jWRy7uqX8JpK3ErCIiKSRW1tERGRwIx05dxPXRPI0o4hIiKSV1DJuU3XiPMYtZ9H\nRESGQ21tERGRwARROb/kJ8+yXi1gERERQJWziIhIcJScRUREAqPkLCIiEhglZxERkcAoOYuIiAQm\niNna0l5nPvxM3/f/5dVzhhSJiMjoUOUsIiISmMzkbGbXmtleM3swtuxqM/uumX3HzG4yszmx9y43\ns+1m9oiZ/fu6AheRwWk8i7RDnsr5s8DynmW3AS9z938HPApcDmBmS4CLgZdG2/ydmU1UFq0E564l\ns7lryezU919x69O84tanhxiRZPgsGs8iwctMzu7+DWBfz7Jb3X0y+vYuYGH0egWwwd1/5e4/ALYD\nyyqMV0RK0HgWaYcqJoS9Bfh89HoBncHdtTNaNo2ZrQZWAxwzV/PS2koTwkZO6fE8a9ExdcYnMhZK\nTQgzsw8Ak8D1g27r7uvcfam7L53zPHXKRJpW1XiemKtfyETKKlyymtmbgAuB89zdo8W7gGNjqy2M\nlolIwDSeRcJSKDmb2XLgfcA57v6L2Fs3AzeY2UeB3wROAO4uHaWI1EbjOXz/4aQTmw5BhiwzOZvZ\n54BzgblmthO4gs5szl8DbjMzgLvc/T+5+0NmthF4mE577DJ3319X8NK87kzttGvP3ZnauvYcBo1n\nkXbITM7ufknC4mv6rP9XwF+VCUpE6qHxLNIOmiYtpWi2tohI9ZScpVZ6AIlIthP//EYAtv3l6xuO\nJHzdczXq9GxtERGRwCg5SylZj+8UkfzGpSos4sQ/v3Gszo/a2lKJu5bMzrz+LCLZxikBSTpVziIi\nIoGxQw8DajAIs6eAnwM/ajqWFHMJM7ZQ44JwYws1Lpge2wvc/eimginKzH4GPNJ0HCna9PcfilDj\ngvbENvBYDiI5A5jZve6+tOk4koQaW6hxQbixhRoXhB3bIEL+ORTb4EKNC0Y7NrW1RUREAqPkLCIi\nEpiQkvO6pgPoI9TYQo0Lwo0t1Lgg7NgGEfLPodgGF2pcMMKxBXPNWURERDpCqpxFRESEAJKzmS03\ns0fMbLuZrWk4lmPN7Otm9rCZPWRm74yWrzWzXWa2NfpzQUPx7TCzB6IY7o2WHWVmt5nZ96KvRw45\nphfHzstWM/upmb2rqXNmZtea2V4zezC2LPEcWcffRP/2vmNmpzYQ29Vm9t3o+DeZ2Zxo+WIz+2Xs\n/H2qztiqEsp41lguHJfGc/G4qh3L7t7YH2AC+D5wPDAb+DawpMF45gOnRq+PAB4FlgBrgfc0ea6i\nmHYAc3uW/TWwJnq9Bvhww3+fTwIvaOqcAWcDpwIPZp0j4ALgq4ABZwKbG4jt1cDM6PWHY7Etjq/X\nhj8hjWeN5cr+PjWe88dV6VhuunJeBmx398fc/RlgA7CiqWDcfbe73xe9/hmwDVjQVDw5rQDWR6/X\nAxc1GMt5wPfd/fGmAnD3bwD7ehannaMVwHXecRcwx8zmDzM2d7/V3Sejb+8CFtZ1/CEIZjxrLFdC\n43mAuKoey00n5wXAE7HvdxLIADKzxcApwOZo0dujdsW1TbSbIg7camZbzGx1tGyeu++OXj8JzGsm\nNAAuBj4X+z6Ecwbp5yi0f39vofObf9dxZna/md1pZq9oKqgBhHY+AY3lEjSeiys9lptOzkEys18H\n/hF4l7v/FPgk8ELgZGA38N8bCu133P1U4HzgMjM7O/6md3oojUy/N7PZwGuA7lP7QzlnUzR5jvox\nsw8Ak8D10aLdwCJ3PwX4U+AGM3teU/G1lcZyMRrPxVU1lptOzruAY2PfL4yWNcbMZtEZzNe7+xcB\n3H2Pu+939wPAp+m074bO3XdFX/cCN0Vx7Om2bqKve5uIjc5/Mve5+54oxiDOWSTtHAXx78/M3gRc\nCPxR9J8N7v4rd/9x9HoLnWu5Lxp2bAMK4nx2aSyXovFcQJVjuenkfA9wgpkdF/2mdjFwc1PBmJkB\n1wDb3P2jseXx6xavBR7s3XYIsT3XzI7ovqYz+eBBOudrVbTaKmDTsGOLXEKsBRbCOYtJO0c3A2+M\nZnmeCfxbrF02FGa2HHgf8Bp3/0Vs+dFmNhG9Ph44AXhsmLEVEMx41lguTeN5QJWP5bpms+X9Q2eG\n3aN0fpv4QMOx/A6dFsl3gK3RnwuA/w08EC2/GZjfQGzH05n9+m3goe65Ap4P3A58D/gacFQDsT0X\n+DHwG7FljZwzOv+h7AaepXPN6dK0c0RnVucnon97DwBLG4htO53rZN1/b5+K1v2D6O95K3Af8PvD\n/nst+DMGMZ41lkvFp/FcLK5Kx7KeECYiIhKYptvaIiIi0kPJWUREJDBKziIiIoFRchYREQmMkrOI\niEhglJxFREQCo+QsIiISGCVnERGRwCg5i4iIBEbJWUREJDBKziIiIoFRchYREQmMkrOIiEhglJxF\nREQCo+QsIiISGCVnERGRwCg5i4iIBEbJWUREJDBKziIiIoFRchYREQmMkrOIiEhgakvOZrbczB4x\ns+1mtqau44hIvTSWRYbP3L36nZpNAI8CvwvsBO4BLnH3hys/mIjURmNZpBkza9rvMmC7uz8GYGYb\ngBVA4oCec8Rc/82jF9cUyuh7zlGdr7/Y12wcoWjD+XjOUdnxbfvBlh+5+9HDiSjVQGMZwGY/33nO\nwiGF1zK/9Vji4tN48bRlW3ik77q976ctzxI/dta2eY5R9f6K6j2n046x/fjp2/zWLLZsfzbf/n9r\n1qF9Z23zi534Mz+2XDuO1JWcFwBPxL7fCZwRX8HMVgOrAY6Zu4h/+NA9NYUy+k69uPN3ft+G6rsg\nbdSG83HqxZYZ32mXzHh8SOH0kzmWYep45vCFzDzn1qEE1zYHvrQycfm9due0ZRN+Tt91e99PW54l\nfuysbfMco+r9FdV7TnuPMeOijdO32TSPWSv25Nv/pnkHX2dtM3nnq3PtM66u5JzJ3dcB6wCWHL80\n3P9FRSRTfDzbnJM0njP0JoZZTP/PfQbTk0d83fj75y65gYkPFUtwgyTGPOtWvb9Xvv8iAO54+A25\n9wvTz2n8fKX9kpQ3MQ+6bhF1JeddwLGx7xdGy6Qi3eowbVm8KkuqJAdZN8+x07bJOnbWsYoet479\ntaEir4HGcsC6Seb/NBxHnf7Ph74Uvep8Tap4R1Fds7XvAU4ws+PMbDZwMXBzTccSkfpoLIs0oJbK\n2d0nzeztwD8DE8C17v5QHccaV4NWu0XWTdu+XxU86LHzbF90u0H2l7XPMsdrM43lcKW1ZgfVbRtX\n7VDFW60DX1o5FtVzbdec3f0rwFfq2r8MLs8kpH7bwvSE1v0+K3n1O3a/XxjStrtvgxdKmHnOQVI8\nY9bKnkJjOSxFk3JdSbjo8cok7+45GOUkrSeEiYiIBKax2doyfFW1sqs8dloFXOfkq3FtUUv75ama\nh10hF5UnzqzqepRb3ErOMnbGuUUt7TRKSXkQr3z/RbkSNIxei1ttbRERkcCochYRCVi/qnkUq+Ve\n3Z9x3FrcSs6SKX5deFgt4fgs8GG3obNutaoinn7n9OA18UtKH0ZaLKuVPQ6JOS5Pkh6lFrfa2iIi\nIoFR5dxCy7atmvL9JNcBU2chz7zijdPe790uSdq6ScfIiivvsfutM8l1mbOr02JNPOYVMPnB6/ru\nr99jSOug2eMSp4q5v7yTxNpePdfyec6DWnL8UtenUuW3bNsq7j5xfdNhNK5smzn083jaJTO2uPvS\npuMYlM05yfWpVMnytF2VnPPrl6SzzvEwk/fkna/Gn/72QL+Fq60tIiISGCVnqc2+f/1y0yGItMq5\nS25oOoRW6ddFOPCllZU9f7wJSs4iIiKB0YQwqVy8Yu6+Purlv1d4f0mfPa1JVDKKsiY66XrzdFm3\nWLX19iolZ6nc8qOmD4K7KZ6c4wl52PdbiwyDJoFJL7W1RUREAqPKWSrT717mKm5bUrUsIuNClbNU\nIs8DTpZtW5VrPZFxopa2JFFyFhERCYza2lJKkUo49CdziQyDKubhOnfJDdzx8BuaDiM3Vc4iItJ6\nWb/MZN2mFholZxERkcAoOUthWS3tW/alt+00OUxEJF3ha85mdixwHTAPcGCdu3/czI4CPg8sBnYA\nK939J+VDlVCUScoSJo1nGQUJ0HGYAAAbDElEQVR5nxbWBmUq50ngv7j7EuBM4DIzWwKsAW539xOA\n26PvRSRsGs8iASmcnN19t7vfF73+GbANWACsALpTcdcDmnIoEjiNZ5GwVHIrlZktBk4BNgPz3H13\n9NaTdNpkSdusBlYDHDN3URVhyBD0a2kntbO7y5Ket93dn26rCkvZ8czhC2uPse36tVd1C5VABRPC\nzOzXgX8E3uXuP42/5+5O5/rVNO6+zt2XuvvSI484umwYIlKBKsYzs48aQqQio61UcjazWXQG8vXu\n/sVo8R4zmx+9Px/YWy5EGQWauR0+jWeRcBROzmZmwDXANnf/aOytm4Hu/7SrgE3FwxORYdB4FglL\nmWvOLwf+GHjAzLZGy94PXAVsNLNLgceB9sxdl1RV3D6l689B03gWCUjh5Ozu/xewlLfPK7pfERk+\njWeRsOgJYSIiIoFRcpZMWbdPDfpEME0OExHpTx8ZKan0mE4RkWaochYREQmMkrM0Iqsdrva2iBQ1\nCk9ZU3KWRFVfZ+63r6JxiIgkSftUqjZRchYREQmMJoTJFJoEJtKcUWjHSjVUOUvjdP1ZpGMU2rFS\nDSVnERGRwKitLYDa2SIiIVHlLMHQzG0RkQ4lZxERkcAoOUtQLW1NDhMRUXIWEREJjpKziIhIYDRb\ne4yF1M5OO/byozYmvr9s2yruPnH9MEMSqcyMizZy4EvJ46t7r7MeSDLeVDmLiIgERpWziIiMhKwn\nrM24qNOJS+tahESV85gKuaUdp5nbIpJH3sTcFkrOIiIigSmdnM1swszuN7N/ir4/zsw2m9l2M/u8\nmc0uH6aIDIPGs0gYqqic3wlsi33/YeBj7v5bwE+ASys4hlQkqxWc9RCQJuR5MIlURuNZJAClkrOZ\nLQR+D/hM9L0BrwS+EK2yHtD9ACItoPEsEo6ys7X/B/A+4Ijo++cDT7v7ZPT9TmBByWNIRbIqZhl7\nGs812293AjCLPZkzh3W/cz6jNhGsq3ByNrMLgb3uvsXMzi2w/WpgNcAxcxcVDUNyaMvM7Cy37FvZ\n96EkgB5MUlCV45nDF1YbXACe3TSPWSv2lN5Hkn4PJIFO8gktQd925bv53cs/1nQYfXV/EWJT0rt3\nMot6/j6B0v9WoFzl/HLgNWZ2AXAY8Dzg48AcM5sZ/ba9ENiVtLG7rwPWASw5fqmXiENEyqtsPNuc\nkzSeRUoqnJzd/XLgcoDoN+33uPsfmdmNwOuADcAqUn5vESlCj/Wsh8Zz/Wat2HOwQp7B1H+/anEX\nk9bS3m939q1eD3xpJfs3RZcYBqxyuxVz2nbPbpqXuU4eddzn/GfAn5rZdjrXrK6p4RgiMhwazyIN\nqOTxne5+B3BH9PoxYFkV+5XyRnUSmK4/10fjuTltvP7chKxJYE2atWJP3+vReenZ2iIiPZL+c40v\ni7crk1qYaevGZ2unOTiRKcGEnzPl+9uufDfAlMlZ3WVxeSZvJW2Xd9sq9pf0syTpl5gP/uJyZY4A\nA6fHd4qIiARGlfOIGpXbp0Sa0DuRJ++tVGUnAsWP021v91bSaZPD8lbRvYpuN8j+svab53h5Wtl3\nPPyGzHV6VXGbXB2UnEfQqF5n7pVn5jbo2rMMR5n/5Psl9Qk/J7HV3UlWnaSWdd9x2vtpy3/38o8V\nStB57n9OSt79tsl7fXnQh410z3UV14froLa2iIhIYFQ5j5BxbWX3m7kNuvdZhqOKVnZc1r3PcWmV\nZ3d5bxWcd/JVHmn7LrJtVzeueNXc7R70ToqLV8x5q+D9dueUO/bTOhf9Ohp5jtVdZ+nSWbniilNy\nlpGgFreMqhkXbcx89EsTt1ilXauO/0IwSKs9nqjramV3k2XapYI822f9EtY7U79o21xtbRERkcCo\nchYRGQHxajPkB5WktdS77eo8FW2RT5qaUvXGCvM81W2eWfjxdXqr58ntzw4cr5LzCBjXa81JdP1Z\n2ib+n3lWyzTrCWJd09vCU6/t5m095zHIw0b6ta7ztpnzJOakhNv7MJhB29O9y9MeOpP0usg1Z7W1\nRUREAqPKueVUNYtUy09+b8LS66ZURDOveOOU99K3m76f5HU7y7Paq37ye7G1Lzi04IM5DhlJnmT1\n7szZ1V+ffX/i8qnn4JDJK6/ru7/e2dbQv2qu6pGceSdmVbFeb1WutvYYaSopv+pvL+Rr7/inWvZd\nlbwzt0XS2Narp3zf+5+tcfW09+LL0nQT04ytU/9t5r0Nq/cYSddQ+x0373I4lDB97ePJ263tf6z9\ndue0/acl4fh68XVmXLSx8wksBfQ+ZCR+jg98aeW0j+3s3S6+bdr78fWqfsqY2toiIiKBUeU8gnqr\n5lf97YUApSre7j7aJK2Cnvxgp+2W1pYTaZu8E8WqlPZQkH7i9xf3267obOwp8Z1yBgCzVmw++P6M\ntYsBMF7Qs96Og68n7t88bXmWQSb15aXKWUREJDCqnCWX0K8zj5JTL7bOi0uajUPaJV5tDlJFx6/x\nDnKvcdL2vfyD069Xd69V99uuW/XGzYi2O7BpR+I2M9Yu5sDaHdO2P/R6x8H3gdhT16buL+nY8WXd\nyjpN0nVqPb5TJEG/e5/1WE8ZBfHZ37b16sS2cFrCjifkQVrV3RbxoLK2m5JAB9y+6HtVbNOvnV1k\ntrba2iIiIoFR5dwig9w+lTSBK74s3qZOuj1qkHX7HScuabve/SVtm6el3m/CWtGW/MH2co/7Nnjq\n+mnvicRVNXlryj3PET/5vYm3P2UZZGJXYrtaKqXk3BL9EnPSPc1JCW+QJFckoSVtO8gs76xfEvJu\nl7RNnnufu63tblLuTbRpyTptuUivIrOQ0/R76Ek3affer11W0vXYcdM9B1nXnstSW1tERCQwpSpn\nM5sDfAZ4GeDAW4BHgM8Di+lMg1vp7j8pFaXUruy90Gnt7u6yfhVw1rHT3k9bnueYSdIq5qT14uuM\nSitb47kd8j0mdOq6VVTQoVfNW77625x2/jenfA/kXtYVf69JZdvaHwducffXmdls4DnA+4Hb3f0q\nM1sDrAH+rORxpGZlknKZ7bO2/do7/in1+nldt3eNcZta4zlwgyTmYeomuNOXz+SeWyZL7ev+r76B\nLfx29op94iiyrPe9ppN04ba2mf0GcDZwDYC7P+PuTwMrgO59KeuBcD9YVEQAjWeR0JSpnI8DngL+\nl5mdBGwB3gnMc/fd0TpPAvk+4kMkEKPSph6QxnPAsirmKy87jMs/8f/6bl/15DCpV5kJYTOBU4FP\nuvspwM/ptLwOcnenc+1qGjNbbWb3mtm9P/nZUyXCGG37/vXL7PvXLzcdhoy+ysYzz+yrPViRUVcm\nOe8Edrp7dz75F+gM7j1mNh8g+ro3aWN3X+fuS9196ZFHHF0iDBGpQGXjmdlHDSVgkVFWuK3t7k+a\n2RNm9mJ3fwQ4D3g4+rMKuCr6uqnPbqQCaffudk1yYd/1Jrkwcx+p617ReT35t9f1/ZSntGPkOXZi\n/FdsZPJv+3+6VNbPXVTaw0aqeghJE211jef2u/KywwBS29tVztyW+pWdrf0O4PpoZudjwJvpVOMb\nzexS4HFguJ9jNkKqbmd3PyoR6vm4xPj+h6mq43bPSdb+8j6cpIU0ngPU73pzNyH3LtP15/YrlZzd\nfSuwNOGt88rsV0SGT+NZJBx6fOcYyKqSB6mi09YtWonn2a7oMdMq4LTtuo/47N4/fdTLf2/K+2mf\nYDWms7ulZnlmaMvo0uM7RUREAqPkHKCk26eSPtxCqpN0fnULm7TVlZcd1rey9pPfG+zTxqRDbe0A\n9bZTu+6mszzroyPHTe9Et0EmiPW2qEWapna2gCpnERGR4KhybqGsai/+2cShSqv+P/SmvwPgVZuf\nm3tf923wg7czTX7wumkTtEI/FyJ1yXPvs26rCpOSswxdv7b8+z/7nwF41YAJVTOmZRTU1dLud++z\nHk4SJrW1RUREAqPKWYZmkIlsafcUi4iMAyVnCVobrp+LlDWMGdq6/twuamuLiIgERslZhkL3ZouI\n5Ke2ttQqKyl3b52CQzO10/ah9raMIj10RJKochYREQmMkrMEI15FJ1FrXEZJ1vOts56PXZSeud0O\namtLbfol06xELCKj7ZTNN3D/GW9oOoxgqXIWEREJjCpnCUq3otbkMBllWe3suhW95/nA2h0AzFi7\nuPCxT9l8Q+FtQ/PspnkAzFqxp/J9q3IWEREJjCpnqdwgt09lrdOvglb1LNI+us6cj5KziMiQ6J5m\nyUttbRERkcCUqpzN7N3AWwEHHgDeDMwHNgDPB7YAf+zuz5SMU1qginZ20jaaHDYcGs/jJ8/nPE9+\n8LpoyY6D73UnhsHUyWGTH7yO+2851LbunfwVb2nnuZUqbfJY0na9+0vaNk9LvbvdgYT3ikyEe3bT\nPJYunTXwdoWTs5ktAP4EWOLuvzSzjcDFwAXAx9x9g5l9CrgU+GTR44hI/TSe69f0DO2sY6cl6ZlX\nvBGAA/aNgWZrl7mPuZsgs5LtIMfOs318u9PO/+aUX0KKeHbTPGat2MPk9mcH3rZsW3smcLiZzQSe\nA+wGXgl8IXp/PXBRyWOIyHBoPIsEonDl7O67zOwjwA+BXwK30ml7Pe3uk9FqO4EFSdub2WpgNcAx\ncxcVDUMCUUdLu3dbzdyuT5XjmcMX1h5vm4zbJLCkqnfQ7ZO27S7rVwFnHTvt/bTl3Q7BoBV0Ffc/\nl2lrHwmsAI4DngZuBJbn3d7d1wHrAJYcv9SLxiHNqjMpJ+1L15/rUeV4tjknaTy3UL/rz4OospVd\n5bHvP+MNiYm9zseIdpN0kWvOZdrarwJ+4O5PufuzwBeBlwNzorYYwEJgV4ljiMhwaDyLBKTMbO0f\nAmea2XPotMHOA+4Fvg68js4Mz1XAprJBikjtNJ5rEOoksDRZk8NkMN22dpEJYWWuOW82sy8A9wGT\nwP102lpfBjaY2V9Gy64pegwJ2zBb2lIvjedqjdt1Zqleqfuc3f0K4IqexY8By8rsV0SGT+NZJBx6\nfKcMrMmKWTO3RepX1eSwQcRnYw/z+dvxiWIhPfdbyVlaSTO3RZrT+4Sw05d3UskkHHydJW3dSbIf\nGJK2Xdaxu/cE9q7XXR4/btITwuI6ly6ui70+ZNaKzsdtNjVbW0RERGqgylkG0q+lrQlgMu5GfSJY\n2mM777mlU3vO5I0HX2e5/5Y3cE/K8ixp2yUtT9p373pJxzzt/G8efJ30EBLbevXB2djG1QeXx/8N\nlJmtrcpZREQkMErOMrKWbVuVOXlNpCqjXjXLcKmtLbmEeE9z1sztLs3gFpE8pkxEW7u49KdSlaHK\nWUREJDCqnKX1+t1WJVI3tbNHR/ye5wNrd+T63Oq6KDlLpjbM0M7zcBLQvc9SLSXm0dN9EEl8tnYT\n1NYWEREJjJKzpMqa7RxK1RyXFZNmb4tIG6itLSIyoHFqZx+wb/R9f8tXf3tIkYwXVc4iIiKBUeUs\nIjKAflXzKFXMXTP8bCC9gj7t/G/WVj1XOSlry1d/u/FJXoNQcpZEbbvWHKeZ2yLDFU96p53fYCB9\ntCkxg9raIiIiwVHlLFOE+JjOovJU0KqeJa9xmgSWZIafnTk5bBxM3L95KMdRchYRkVyyrj+X3W9T\nQvylQ21tERGRwKhyFmC02tm9+j17W5PDJI9xm6Ed1/35Lv/E/zu4rIoWd9PVclxdHYEyVDmLiIgE\nJrNyNrNrgQuBve7+smjZUcDngcXADmClu//EzAz4OHAB8AvgTe5+Xz2hi+SnyWEdGs9SlaLVZkgV\nc68Zfja29eqmwwDytbU/C/xP4LrYsjXA7e5+lZmtib7/M+B84ITozxnAJ6OvErBRbmnLNJ9F4zm3\ncZ+hnccMP3tK6zvpnLz/bf8NyD6fIejG2HSSzmxru/s3gH09i1cA3TJjPXBRbPl13nEXMMfM5lcV\nrIiUo/Es0g5FJ4TNc/fd0esngXnR6wXAE7H1dkbLdtPDzFYDqwGOmbuoYBgiUoFKxzOHL6wtUGnO\nlZcdNmVSWFza8q42VMyhKT1b293dzLzAduuAdQBLjl868PZSTp6PThzFdrZmbvdXxXi2OSe1fjyr\nnZ0saeZ2FdYdfkel+8tj9S/PHfoxB1F0tvaebnsr+ro3Wr4LODa23sJomYiES+NZJDBFK+ebgVXA\nVdHXTbHlbzezDXQmjvxbrF0mAchTMcNoVs1d+mCMaTSeBzCuVXMRWRV2ExVz77HTKmg/+b2NTgrL\ncyvV54BzgblmthO4gs4g3mhmlwKPAyuj1b9C57aL7XRuvXhzDTFLQePaypZDNJ5lWJ5/7S10/qmF\nbd3hd/RN0NDMzO3M5Ozul6S8dV7Cug5cVjYoEamHxrNIO+jxnQKMZ8Xcb3IYjM+DSaRDk8CqFfqE\nq7g8LW4YbgWt5CxjTdefBZSYq5R3JneT15vbQM/WFhERCYwq5zGgx3OKSChCrpj7TQ6D4c7gVuUs\nIiISGFXOI65f1ayKWUSGKeSquSuUyWGqnEXI/kUl78NbRGS6dYff0YrEHBIlZxERkcCorS0Sybqt\nSkbPhz79FwBc/omGA2mJqj/wImRNTw5Tch5RmqEtIk1reyu7yevPamuLiIgERpXzmFHFnC3PU8P0\nxLD267a08xindm6voj9726vmpik5i4hIZZSUq6G2toiISGCUnEfIsm2r9NCRCvU7X1nnWsL1oU//\nxbSWtj7YQkKjtvYI0Mxskfze/7b/lrjcoq9Zn1Al6Ua1pZ1n1nbVM7ZVOYuIiARGyVlEJGZYnzok\n7dOvM+Anv7fSrova2iIlfO2Mn/Oqzc9tOgypmBJ08gM21PIfHlXOIiIigVHlPKbiD9goM2Es6UEd\nw3pGtSa6iQxPv6p5VCeCJcmaHFYVVc4iIiKBMXfvv4LZtcCFwF53f1m07Grg94FngO8Db3b3p6P3\nLgcuBfYDf+Lu/5wVxJLjl/o/fOieMj/H2PraGT/Xpyg1KF69V33t+bRLZmxx96VV7nMY49nmnOQz\nz7m1yrClAXmvL49T1dwrq3ruXq+fvPPV+NPftr4r98hTOX8WWN6z7DbgZe7+74BHgcsBzGwJcDHw\n0mibvzOziUECksF96E1/pxbvkLX4nH8WjWeR4GUmZ3f/BrCvZ9mt7j4ZfXsXsDB6vQLY4O6/cvcf\nANuBZRXGKyIlaDyLtEMV15zfAnw1er0AeCL23s5o2TRmttrM7jWze3/ys6cqCGO8fO2Mn/O1M34+\nZVmLq7lWSTvHvX8fLVV6PPPMvqRVZMSsO/yOsW5pQ/Y5KHPrWanZ2mb2AWASuH7Qbd19HbAOOtec\ny8Qxjvpd33zViev13OcaHPyYyM3NxlGXqsazzTlJ41mkpMLJ2czeRGdiyXl+aFbZLuDY2GoLo2Ui\nEjCNZ5GwFErOZrYceB9wjrv/IvbWzcANZvZR4DeBE4C7S0cpAztY5ZH8wRhZ7xc5Tu/+kt6r0jC7\nA3X/LE3SeJZef3/W6bXfxyv9ZSZnM/sccC4w18x2AlfQmc35a8BtZgZwl7v/J3d/yMw2Ag/TaY9d\n5u776wpe8slKLFUnnmElslFOmHXReJZ+/v6s04HsW4TG/VrzMGQmZ3e/JGHxNX3W/yvgr8oEJSL1\n0HgWaQc9vlNERAailnd+f3/W6fzXLU8OvJ2Ss4iI8Pdnnc5//Fb0pEZ9+lTj9GxtERGRwKhyFhER\nIP+EMKmfkrOIyBjrJuS4dYffoQRdgTKz2tXWFhERCUzmR0YOJQizp4CfAz9qOpYUcwkztlDjgnBj\nCzUumB7bC9z96KaCKcrMfgY80nQcKdr09x+KUOOC9sQ28FgOIjkDmNm9VX92bVVCjS3UuCDc2EKN\nC8KObRAh/xyKbXChxgWjHZva2iIiIoFRchYREQlMSMl5XdMB9BFqbKHGBeHGFmpcEHZsgwj551Bs\ngws1Lhjh2IK55iwiIiIdIVXOIiIigpKziIhIcBpPzma23MweMbPtZram4ViONbOvm9nDZvaQmb0z\nWr7WzHaZ2dbozwUNxbfDzB6IYrg3WnaUmd1mZt+Lvh455JheHDsvW83sp2b2rqbOmZlda2Z7zezB\n2LLEc2QdfxP92/uOmZ3aQGxXm9l3o+PfZGZzouWLzeyXsfP3qTpjq0oo41ljuXBcGs/F46p2LLt7\nY3+ACeD7wPHAbODbwJIG45kPnBq9PgJ4FFgCrAXe0+S5imLaAcztWfbXwJro9Rrgww3/fT4JvKCp\ncwacDZwKPJh1joALgK8CBpwJbG4gtlcDM6PXH47Ftji+Xhv+hDSeNZYr+/vUeM4fV6VjuenKeRmw\n3d0fc/dngA3AiqaCcffd7n5f9PpnwDZgQVPx5LQCWB+9Xg9c1GAs5wHfd/fHmwrA3b8B7OtZnHaO\nVgDXecddwBwzmz/M2Nz9VnefjL69C1hY1/GHIJjxrLFcCY3nAeKqeiw3nZwXAE/Evt9JIAPIzBYD\npwCbo0Vvj9oV1zbRboo4cKuZbTGz1dGyee6+O3r9JDCvmdAAuBj4XOz7EM4ZpJ+j0P79vYXOb/5d\nx5nZ/WZ2p5m9oqmgBhDa+QQ0lkvQeC6u9FhuOjkHycx+HfhH4F3u/lPgk8ALgZOB3cB/byi033H3\nU4HzgcvM7Oz4m97poTRyb5yZzQZeA9wYLQrlnE3R5Dnqx8w+AEwC10eLdgOL3P0U4E+BG8zseU3F\n11Yay8VoPBdX1VhuOjnvAo6Nfb8wWtYYM5tFZzBf7+5fBHD3Pe6+390PAJ+m074bOnffFX3dC9wU\nxbGn27qJvu5tIjY6/8nc5+57ohiDOGeRtHMUxL8/M3sTcCHwR9F/Nrj7r9z9x9HrLXSu5b5o2LEN\nKIjz2aWxXIrGcwFVjuWmk/M9wAlmdlz0m9rFwM1NBWNmBlwDbHP3j8aWx69bvBZ4sHfbIcT2XDM7\novuazuSDB+mcr1XRaquATcOOLXIJsRZYCOcsJu0c3Qy8MZrleSbwb7F22VCY2XLgfcBr3P0XseVH\nm9lE9Pp44ATgsWHGVkAw41ljuTSN5wFVPpbrms2W9w+dGXaP0vlt4gMNx/I7dFok3wG2Rn8uAP43\n8EC0/GZgfgOxHU9n9uu3gYe65wp4PnA78D3ga8BRDcT2XODHwG/EljVyzuj8h7IbeJbONadL084R\nnVmdn4j+7T0ALG0gtu10rpN1/719Klr3D6K/563AfcDvD/vvteDPGMR41lguFZ/Gc7G4Kh3Lenyn\niIhIYJpua4uIiEgPJWcREZHAKDmLiIgERslZREQkMErOIiIigVFyFhERCYySs4iISGD+PweNEw/z\nE74eAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fIPEGKH9WYCg", + "colab_type": "text" + }, + "source": [ + "### Visualise Semantic Segmentation Results" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nE-8-7TXT54I", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "outputId": "91194862-7228-4d61-97a8-50ae14650dca" + }, + "source": [ + "\n", + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = vis_demo.run_on_opencv_image(image, semantic=\"True\")\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()" + ], + "execution_count": 53, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X2wXXV56PHvc4nUoiggTqQJllBp\nZyhzrUyKdKDqiO1FiiQdHQr1XqKmpp3RFsrthCDj4L3eawFbWju3VxuEGjq8+F5ir7YipVU7JSWJ\nIG9FIkpJJi8KorQ4avS5f+x1ZHNyTs7Ze6+91m/t/f3MnDlrr7P2Xs9ZJ0+e/fzWb60dmYkkSSrH\nf2o7AEmS9EwWZ0mSCmNxliSpMBZnSZIKY3GWJKkwFmdJkgoztuIcEWdGxIMRsSMiNoxrP5LGy1yW\nmhfjuM45Ig4BvgL8CrATuBM4PzPvr31nksbGXJbaMa7O+RRgR2Y+nJnfB24GVo1pX5LGx1yWWrBk\nTK+7DHi07/FO4OXzbRwR3qZMOtA3M/OFLccwUC6D+SzNJTNjkO3HVZwXFBHrgHVt7V/qgEfaDmCx\nzGepXuMqzruAY/seL6/W/VhmbgQ2gu+0pYItmMtgPkt1G9c55zuBEyJiRUQcCpwHbB7TviSNj7ks\ntWAsnXNm7o+ItwN/BxwCXJeZ941jX5LGx1yW2jGWS6kGDsJhMGku2zJzZdtBDMp8lg406IQw7xAm\nSVJhLM6SJBXG4ixJUmEszpIkFcbiLElSYSzOkiQVxuIsSVJhLM6SJBXG4ixJUmEszpIkFcbiLElS\nYSzOkiQVxuIsSVJhxvKRkZI0yX5z7acXve2N1541xkg0qfzISKlcfmRkgwYpuHWyeE8HPzJSkqSO\ns3OWymXnPGZtdcvzsYueXIN2zhZnqVwW55qVVowPxkI9WRzWliSp4+ycpXLZOdegS93yfOyiu89h\nbWlyWJyHMAnF+GAs1N3ksLYkSR03dHGOiGMj4vaIuD8i7ouIC6v1R0XErRHxUPX9yPrClTQO5nN3\n/ObaT0/86IBGGNaOiGOAYzJze0QcDmwDVgNvAh7PzCsiYgNwZGZessBrOawtHaixYe2u5/O0FiuH\nuLujsWHtzNydmdur5SeBB4BlwCpgU7XZJnoJLqlg5rNUllrOOUfEccDLgC3A0szcXf1oD7C0jn1I\naob53B0OcU+ukYtzRDwX+DhwUWZ+p/9n2Rszn3OIKyLWRcTWiNg6agyS6tHFfLY4eQwm0UjFOSKe\nRS+Rb8jMT1Sr91bnr2bOY+2b67mZuTEzV3bxUhFpEpnPUjlGmRAW9M5BPZ6ZF/Wtfy/wWN8EkqMy\nc/0Cr+WEMOlATU4I61w+2y3OzUliZRp0Qtgon+d8GvDfgHsi4q5q3TuAK4CPRMRa4BHg3BH2IakZ\n5rNUEO8QJpXLO4QdhJ3z/Oyey+MdwiRNPAuzJp3FWZKkwjisLZXLYe1Z7JgH4/B2ORzWliSp4yzO\nkiQVxmFtqVwOa/dxSHt4Dm+3z2FtSZI6zuIsSVJhLM6SJBXG4ixJUmEszpIkFcbiLElSYSzOkjTh\nvAyteyzOkiQVxuIsSVJhLM6SJBXG4ixJUmEszpIkFcbiLElSYSzOkiQVxuIsSVJhLM6SJBVm5OIc\nEYdExJci4m+qxysiYktE7IiID0fEoaOHKakJ5rNUhjo65wuBB/oeXwn8SWa+BPgWsLaGfUhqhvks\nFWCk4hwRy4FfAz5YPQ7g1cDHqk02AatH2YekZpjPUjmWjPj8PwXWA4dXj18APJGZ+6vHO4Flcz0x\nItYB60bcv6T6FJnPfmiDptHQnXNEnA3sy8xtwzw/Mzdm5srMXDlsDJLqYT5LZRmlcz4NOCcizgKe\nDTwPeB9wREQsqd5tLwd2jR6mpDEzn6WCDN05Z+almbk8M48DzgP+PjPfCNwOvKHabA1wy8hRShor\n81kqyziuc74EuDgidtA7Z3XtGPYhqRnms9SCUSeEAZCZ/wD8Q7X8MHBKHa8rqXnms9Q+7xAmSRPu\nxmvPajsEDcjiLElSYSzOkiQVxuIsSVJhLM6SJBXG4iypaDdee5YTmjR1LM6SJBXG4ixJUmFquQmJ\nJKk8ng7oLjtnSZ1godE0sThLklQYh7UXcNXHrx/6uetff0GNkUiSpoXFeQ6jFOT5XsdCLakpngLo\nPoe1JUkqTGRm2zEQEa0GUVenvFh20VqkbZm5su0gBtVEPv/m2k+PexedZudcnsyMQba3c5YkqTBT\n2zk33S3PxQ5aC7BzPgi75/nZOZdn0M55aiaElVCMZ3PCmKS6WZgng8PakiQVZuKHtUvsmA/GDlp9\nHNZeBIe3e+yYyzbosPZEFueuFeTZLNCqWJwXyQJtcS6ds7UlSeq4kTrniDgC+CBwEpDAW4AHgQ8D\nxwFfB87NzG8t8Dq1vdPuetfczw566jXaOZeYz4Oatg7abrk7Gh3WjohNwBcy84MRcShwGPAO4PHM\nvCIiNgBHZuYlC7zO0EFMUjE+GAv1VGq6OLeez3Wa9EJtYe6Wxoa1I+L5wCuAa6sdfz8znwBWAZuq\nzTYBq4fdh6RmmM9SWUa5znkF8A3gLyPipcA24EJgaWburrbZAyyd68kRsQ5YN8L+p6ZrlhrQej7X\nbaaznLQO2o55Ogw9rB0RK4E7gNMyc0tEvA/4DvC7mXlE33bfyswjF3itgYKY9qLsEPfUaGxYu818\nbkKXC7TFeDI0OVt7J7AzM7dUjz8GnAzsjYhjAKrv+0bYh6RmmM9SQUadEPYF4Lcy88GIeBfwnOpH\nj/VNIDkqM9cv8DqLDmLau+YZds9ToekJYY3nc9O61EHbMU+Wpmdr/wK9Sy8OBR4G3kyvG/8I8GLg\nEXqXXjy+wOssGIRFeW4W6YnWdHFuLJ9L03bRthBPvkY/+CIz7wLm+s/jjFFeV1LzzGepHJ24fadd\n88HZPU8sb9/Zgia6aDvl6ePtOyVJ6jg75wliBz1x7JylCdHoOedxsyhLkqaRw9qSJBWmiGHt5S9Z\nkRde9T/bDmOiOMQ9ERzWliaEE8IkSeo4i7MkSYWxOEuSVBiLsyRJhbE4S5JUGIvzhLrq49d7nbgk\ndZTFWZKkwlicJUkqjMVZkqTCWJwnnOedJal7LM6SJBXG4ixJUmEszpIkFcbiLElSYSzOkiQVxuIs\nSVJhRirOEfH7EXFfRNwbETdFxLMjYkVEbImIHRHx4Yg4tK5gJY2P+SyVY+jiHBHLgN8DVmbmScAh\nwHnAlcCfZOZLgG8Ba+sIVNL4mM9SWUYd1l4C/GRELAEOA3YDrwY+Vv18E7B6xH1Iaob5LBVi6OKc\nmbuAPwL+jV4SfxvYBjyRmfurzXYCy+Z6fkSsi4itEbH1P7795LBhSKpBnfncRLzSpFsy7BMj4khg\nFbACeAL4KHDmYp+fmRuBjQDLX7Iih41D6vfFf3rporc9/bS7xxhJt9SZzxFhPhfmhtVPtbr/N/71\nYa3uv4tGGdZ+DfC1zPxGZv4A+ARwGnBENSwGsBzYNWKMksbPfJYKMnTnTG/469SIOAz4LnAGsBW4\nHXgDcDOwBrhl1CAlGKwrruP1pqyzNp8nSNud8mz98dhFL87QxTkzt0TEx4DtwH7gS/SGtf4fcHNE\n/K9q3bV1BKrpVHdBrnvfk1LAzefuK60gz2euOC3YBxqlcyYzLwcun7X6YeCUUV5XUvPMZ6kcIxVn\naVza7JgH0R/npHTR6p6udM3zuWH1U3bPs1icVZSuFOW5zBe7RVvj0vWi3M/z0s/kvbUlSSqMxVnF\n6HLXfDBf/KeXTuzvpvZMUtc82w2rn5ro328xLM6SJBXGc85q3bR0lV/8p5d6/lkjm/aOclrYOUtS\nR1iYp4fFWZKkwlic1appGdKe4eQwDWNaJ0hN4+88w3POaoUFSpLmZ+csSVJhLM4Tbv3rL2g7BM3B\nkQNpcaZ1SN/iLElSYSzOkiQVxuIsSQWbxiFdOVt7Yr1y/7vbDkGSNCQ7Z0mSCmPnrMY5U1lamMPZ\n062Izvnw3O0wbI1KPZbeHUuSFqeI4ixJkp5WVHHu7/i8ecbgXrn/3cV2zZKkxSuqOEuSpEUU54i4\nLiL2RcS9feuOiohbI+Kh6vuR1fqIiD+LiB0R8eWIOHmU4Na//gI7aKlGbeazpMVbTOf8IeDMWes2\nALdl5gnAbdVjgNcCJ1Rf64D3DxqQQ7OD85hpAB+iwXyWNJwFi3Nmfh54fNbqVcCmankTsLpv/fXZ\ncwdwREQcU1ewkkZjPkvdMOx1zkszc3e1vAdYWi0vAx7t225ntW43s0TEOnrvxnnR0U+H8fLfeGjI\nkKbTfB2zx1EDqDWfJY1u5JuQZGZGRA7xvI3ARoCIyIMVk/Wvv4CrPn798EFOoIMNY1uYNay68rn2\nwKQpM+xs7b0zw1vV933V+l3AsX3bLa/WSSqX+SwVZtjivBlYUy2vAW7pW39BNcvzVODbfcNlI3Hm\n9tMONpRdatfsncGK1ng+Szq4BYe1I+Im4FXA0RGxE7gcuAL4SESsBR4Bzq02/zRwFrADeAp4c90B\n9xfoaRvq9vyyRlVaPkua24LFOTPPn+dHZ8yxbQJvGzUoSeNhPkvd0OlPpZqmLtquWZKmR6eLc7+5\nzkdPQsGeKcoWYUmaHt5bW5KkwkxM5zyXrl4fPYmz0k8/7W5nbPc5/bS72w5BUsHsnCVJKsxEd84w\nfxfaZkc9iZ2xFseOWdJiTHxxnk/TM70tyJKkxXJYW5KkwkTvPgMtB1HwjfLn66rthAfnhLCBh7W3\nZebKccUyLiXncxfdsPqptkNo1Rv/+rC2Q6hFZsYg21uc1ZhpL85DnG+2OAuY7gI9rcXZYW1Jkgpj\ncZYkqTBTO1tbaoqXT0kalJ2zJEmFsTirMaefdrddpCQtgsVZkqTCWJwlSSqMxVkaI4fxJQ3D4qzG\nTUPB8vy6pFFYnCVJKozXOUs1sVPWuMzcwnLabuM5KbfuHIadsyRJhVmwOEfEdRGxLyLu7Vv33oj4\n14j4ckR8MiKO6PvZpRGxIyIejIj/Mq7ApVJ06fyy+dxt09xJTpsFP5UqIl4B/DtwfWaeVK37VeDv\nM3N/RFwJkJmXRMSJwE3AKcBPAZ8DfjYzf7jAPvwUmynWxU+raqgY1/6pVObz5JjkIe5JfBNS+6dS\nZebngcdnrftsZu6vHt4BLK+WVwE3Z+b3MvNrwA56iS2pAOaz1A11nHN+C/CZankZ8Gjfz3ZW6w4Q\nEesiYmtEbK0hBqkxXRnCHpL53BGT2F3C5P5egxpptnZEXAbsB24Y9LmZuRHYWL2Ow2BTrL/YlTrE\nPeEFGTCfu2iuQtbF4W4L8oGGLs4R8SbgbOCMfPrE9S7g2L7NllfrJBXMfJbKsuCEMICIOA74m74J\nJGcCVwOvzMxv9G3388CNPD2B5DbgBCeQaFhtdtIFdMu1TwgD83malNxFT1u3POiEsMXM1r4JeBVw\nNLAXuBy4FPgJ4LFqszsy83eq7S+jd95qP3BRZn5m9mvOsQ+TWQMbtXAXUHwXMo7Z2uazfmwcxXva\niu5i1V6cm2AyaxgW5zKZz91hcW6OxVmq9BfvDhTiuVic1ahhi7UFeWEWZ2lyWJylCVH7TUgkSVKz\nLM6SJBXG4ixJUmEszpIkFWak23fW6JvAf1TfS3Q0ZcZWalxQbmylxgUHxvbTbQUyon8HHmw7iHl0\n6e9filLjgu7ENnAuFzFbGyAitpY6M7XU2EqNC8qNrdS4oOzYBlHy72Fsgys1Lpjs2BzWliSpMBZn\nSZIKU1Jx3th2AAdRamylxgXlxlZqXFB2bIMo+fcwtsGVGhdMcGzFnHOWJEk9JXXOkiSJAopzRJwZ\nEQ9GxI6I2NByLMdGxO0RcX9E3BcRF1br3xURuyLirurrrJbi+3pE3FPFsLVad1RE3BoRD1Xfj2w4\npp/rOy53RcR3IuKito5ZRFwXEfsi4t6+dXMeo+j5s+rf3pcj4uQWYntvRPxrtf9PRsQR1frjIuK7\nfcfvA+OMrS6l5LO5PHRc5vPwcdWby5nZ2hdwCPBV4HjgUOBu4MQW4zkGOLlaPhz4CnAi8C7gD9o8\nVlVMXweOnrXuKmBDtbwBuLLlv+ceetf0tXLMgFcAJwP3LnSMgLOAzwABnApsaSG2XwWWVMtX9sV2\nXP92XfgqKZ/N5dr+nubz4uOqNZfb7pxPAXZk5sOZ+X3gZmBVW8Fk5u7M3F4tPwk8ACxrK55FWgVs\nqpY3AatbjOUM4KuZ+UhbAWTm54HHZ62e7xitAq7PnjuAIyLimCZjy8zPZub+6uEdwPJx7b8BxeSz\nuVwL83mAuOrO5baL8zLg0b7HOykkgSLiOOBlwJZq1dur4Yrr2hhuqiTw2YjYFhHrqnVLM3N3tbwH\nWNpOaACcB9zU97iEYwbzH6PS/v29hd47/xkrIuJLEfGPEfHLbQU1gNKOJ2Auj8B8Ht7Iudx2cS5S\nRDwX+DhwUWZ+B3g/8DPALwC7gT9uKbTTM/Nk4LXA2yLiFf0/zN4YSivT7yPiUOAc4KPVqlKO2TO0\neYwOJiIuA/YDN1SrdgMvzsyXARcDN0bE89qKr6vM5eGYz8OrK5fbLs67gGP7Hi+v1rUmIp5FL5lv\nyMxPAGTm3sz8YWb+CLiG3vBd4zJzV/V9H/DJKo69M0M31fd9bcRG7z+Z7Zm5t4qxiGNWme8YFfHv\nLyLeBJwNvLH6z4bM/F5mPlYtb6N3Lvdnm45tQEUczxnm8kjM5yHUmcttF+c7gRMiYkX1Tu08YHNb\nwUREANcCD2Tm1X3r+89b/Dpw7+znNhDbcyLi8JllepMP7qV3vNZUm60Bbmk6tsr59A2BlXDM+sx3\njDYDF1SzPE8Fvt03XNaIiDgTWA+ck5lP9a1/YUQcUi0fD5wAPNxkbEMoJp/N5ZGZzwOqPZfHNZtt\nsV/0Zth9hd67ictajuV0ekMkXwbuqr7OAv4KuKdavxk4poXYjqc3+/Vu4L6ZYwW8ALgNeAj4HHBU\nC7E9B3gMeH7fulaOGb3/UHYDP6B3zmntfMeI3qzOP6/+7d0DrGwhth30zpPN/Hv7QLXt66u/813A\nduB1Tf9dh/wdi8hnc3mk+Mzn4eKqNZe9Q5gkSYVpe1hbkiTNYnGWJKkwFmdJkgpjcZYkqTAWZ0mS\nCmNxliSpMBZnSZIKY3GWJKkwFmdJkgpjcZYkqTAWZ0mSCmNxliSpMBZnSZIKY3GWJKkwFmdJkgpj\ncZYkqTAWZ0mSCmNxliSpMBZnSZIKY3GWJKkwFmdJkgoztuIcEWdGxIMRsSMiNoxrP5LGy1yWmheZ\nWf+LRhwCfAX4FWAncCdwfmbeX/vOJI2NuSy1Y8mYXvcUYEdmPgwQETcDq4A5Ezoi6n+HIHXfNzPz\nhS3HMFAuV9uYzwU7/EXL2w7hoJ7cs7PtEMYiM2OQ7cdVnJcBj/Y93gm8vH+DiFgHrBvT/qVJ8Ejb\nAbCIXAbzuUte/qaL2w7hoD53RdnxNWVcxXlBmbkR2Ai+05a6znwu32s2XN12CIvymg1XW6AZ34Sw\nXcCxfY+XV+skdYu5PAG6UphnvGbD1Z2LuW7jKs53AidExIqIOBQ4D9g8pn1JGh9zWWrBWGZrA0TE\nWcCfAocA12Xm/z7Itg6DSQfalpkr2w5ikFyutjefWzaJXWfXh7oHnRA2tuI8UBAmszSXIorzoMzn\n9k1icZ7R1SI9aHH2DmGSJBXGzlkql52zBjbJXfNsXeqiHdaWJofFWYs2TUW5X1cKtMPakiR1nMVZ\nkjpuWrvmSeawtlQuh7V1UBblp5U+vO2wtiRJHWdxlqQOsmt+pkk7HhZnSZIKY3GWJKkwrX1kpCRp\ncJM2fKu52TlLklQYi7MkdYRd8/TwOmepXF7nLMCiPKgSr3n2OmdJkjrO4ixJmiiTMNJgcZakgk1C\nodHgLM6SJBXG65wlqUB2zNPNzlmSNHFes+HqTr/BsThLklQYh7UlqSBd7vZUn6E754g4NiJuj4j7\nI+K+iLiwWn9URNwaEQ9V34+sL1xJ42A+l8HCXL+uDm+PMqy9H/jvmXkicCrwtog4EdgA3JaZJwC3\nVY8llc18lgoydHHOzN2Zub1afhJ4AFgGrAI2VZttAlaPGqSk8TKfpbLUcs45Io4DXgZsAZZm5u7q\nR3uApfM8Zx2wro79S6qP+Sy1b+TZ2hHxXODjwEWZ+Z3+n2XvUzXmvAl+Zm7MzJVdvLG/NKnMZ6kM\nI3XOEfEseol8Q2Z+olq9NyKOyczdEXEMsG/UINUtt779Uz9e/pX/87oWI9EgzGepHKPM1g7gWuCB\nzOyfCrcZWFMtrwFuGT48SU0wn6WyDP15zhFxOvAF4B7gR9Xqd9A7T/UR4MXAI8C5mfn4Aq/l5792\nXH+3vBC76UVr7POczed2dfFSny5q83OeB/0856GHtTPzi8B8Oztj2NdVtwxSlPufY4Eui/kslcXb\nd0qSVBhv36mBDdMtS5qbQ9qai52zWnHr2z9lkZekeVicJUkqjMVZA6m727V7lqQDDX0pVa1BeOlF\n8Zooos7gPkBjl1LVyXwenOedm9GlS6nsnCVJKozFWQtqaujZIW5J49Jm1zwMi7MkSYWxOEuSVBhv\nQqJ5tTHMPLNPJ4dJmmbO1tacSjn/O+VF2tnaU8ZZ2+PT9jlnZ2tLktRxFmdJkgpjcZYkqTAWZz1D\naR9IUVIsktQUJ4QJ6EYRnMLJYU4Im0JOChsPJ4RJkqSRWJwlqSCfu+Li1rs8tc/irM7owtC7JNXB\n4ixJUmFGLs4RcUhEfCki/qZ6vCIitkTEjoj4cEQcOnqYGqcudaSlzSafNOazVIaRZ2tHxMXASuB5\nmXl2RHwE+ERm3hwRHwDuzsz3L/Aazu5sQdeL3BTM3m58trb5XB5nb4+mlPP3jc7WjojlwK8BH6we\nB/Bq4GPVJpuA1aPsQ1IzzGepHKMOa/8psB74UfX4BcATmbm/erwTWDbiPiQ1w3wuUCmdn5o19EdG\nRsTZwL7M3BYRrxri+euAdcPuX6Pp+pA2+PGSdTKfNWm6/qZmlM9zPg04JyLOAp4NPA94H3BERCyp\n3m0vB3bN9eTM3AhsBM9RSQUwn6WCDD2snZmXZubyzDwOOA/4+8x8I3A78IZqszXALSNHKWmszOey\neWOS6TOO65wvAS6OiB30zlldO4Z9SGqG+Sy1wA++mDKTcK75YCbs/LMffKEDeGnVwkocZRj0UqpR\nzjlLkho2U3gs0gcqsSgPy9t3SpJUGDvnKTLpQ9rSNPncFRfbPTNZ3XI/zzlPgWkryhN03tlzzlqU\naS3SXSrMjd6+U5Ik1c/iLEkd16UOsg7TcN23w9pTYNqGtWdMwPC2w9oayiQOc3e9GDusLUlSxzlb\nW5ImzKRcC931bnkUFmdJmlBdLdLTXJRnOKwtSVJh7Jwn3LROBpP0tP5OtMQu2k75QBZnSVLjLMgH\n57C2JEmF8TrnKTDtQ9sdvt7Z65xVu8UOa9vZ1svrnCVJ6jiLsyRNkcV0xHbN7XNCmCberW//VJeH\ntqXGWJTLYecsSVJh7JwlaYosOWcP/3D/getfdeKNP54sVmIHvf2U8+dcf/K/3NRwJM1wtvaUmdaZ\n2x0d1na2tmq35Jw9C26zf/OLGohk/oI7jNKLtLO1JUnquJGGtSPiCOCDwElAAm8BHgQ+DBwHfB04\nNzO/NVKUksbOfNaMJefsGUv3XGenvNBrl95JL2SkYe2I2AR8ITM/GBGHAocB7wAez8wrImIDcGRm\nXrLA6zgM1oJpGuJ2WHth5vN0WMywdr9RivQ4i/Gomi7ejQ1rR8TzgVcA11Y7/n5mPgGsAjZVm20C\nVg+7D0nNMJ+lsowyrL0C+AbwlxHxUmAbcCGwNDN3V9vsAZaOFqLGpb+bnOQuuqNdc9PM5wk3aMc8\n6Wa6+lKHv0cpzkuAk4HfzcwtEfE+YEP/BpmZ8w1xRcQ6YN0I+1eNFlvASiniFtzamc+qVclD2l0w\nymztncDOzNxSPf4YveTeGxHHAFTf98315MzcmJkru3ipiDSBzGepIEN3zpm5JyIejYify8wHgTOA\n+6uvNcAV1fdbaolURbBjnUzms+YzMxy+2Ilhdsz1GPUOYb8L3FDN7HwYeDO9bvwjEbEWeAQ4d8R9\nSGqG+ayps/2U84s87zxScc7Mu4C5hrHOGOV1JTXPfJbK4R3CJGnCXbX3rY3sp6tD2ttPOb+42C3O\nkiQVxk+lkqQp8J5r3gnAO9767tpfu7SucxJYnCVpwjQ1jK3xcVhbkqTC2DlLUoeM2hW/55p3DjS0\n3dRnO+uZ7JwlSaKsc+cWZ0mSCmNxlqSOqGui13uueeePZ28vZMk5e/xEqxZYnCWpcFftfetYZmAv\ntkAfTElDwZPE4ixJUmGcrS1JGtikdswzv1fbH4Zh5yxJBRv3DUUGOf+s5licJUkqjMPaklSgNm/B\n6Y1H2mfnLEkFGdfM7IU4tF0Wi7MkSYWxOEtSIfw0Kc2wOEuSVBgnhEmSgKfPO6/f3HIgsjhLUpsc\nytZcHNaWJKkwIxXniPj9iLgvIu6NiJsi4tkRsSIitkTEjoj4cEQcWlewksbHfJbKMXRxjohlwO8B\nKzPzJOAQ4DzgSuBPMvMlwLeAtXUEKml8zOfmtXU982KUGtc0GXVYewnwkxGxBDgM2A28GvhY9fNN\nwOoR9yGpGeazVIihi3Nm7gL+CPg3ekn8bWAb8ERm7q822wksm+v5EbEuIrZGxNZhY5BUD/O5WV3o\nTEvu7KfB0LO1I+JIYBWwAngC+Chw5mKfn5kbgY3Va+WwcUganfncDIudFmuUYe3XAF/LzG9k5g+A\nTwCnAUdUw2IAy4FdI8YoafzMZ6kgo1zn/G/AqRFxGPBd4AxgK3A78AbgZmANcMuoQWqyXfnwk4va\n7pLjDx9zJFPNfJYKEpnDj0BFxP8AfgPYD3wJ+C1656RuBo6q1v3XzPzeAq/jMNiEW2wBXqwpKdTb\nMnNlUzszn8erq0Pa65dec9Cfbz/l/IYiadbJ/3JTra+XmTHI9iPdISwzLwcun7X6YeCUUV5XUvPM\nZ6kc3r5TY1V3x9z/ulPSPUtFmtSOuRQWZ9VuXAX5YPuxUKtEXR3K7jfzOyw0vK16eW9tSZIKY+es\nWjTVLS+0fztoSZPAzlmSpMJnwpdrAAAJz0lEQVRYnDWSKx9+svWuWdL4TcL58y5xWFtDK7EoO4tb\nJZjUQubksObYOUuSVBg7Zw2kxG55NieHqS2T2jHP5jXO42fnLElSYSzOkiQVxmFtLagLQ9lzcXhb\nUlfZOWvidfXNhbplWs43qxkWZ0mSCuOwtiRJlbo/x3lYFmfNa5KGgz3/rHFxOFvj4LC2JEmFsThr\nTpPUNfeb1N9L0mRxWFuSVJv+c7beSWx4ds6SJBXGzlnP4LCvpIV87lPPnf+HS59enD3zeck5e/iL\nX/rFMUU1vN/+5zv7HpUxW9vOWZKkwizYOUfEdcDZwL7MPKladxTwYeA44OvAuZn5rYgI4H3AWcBT\nwJsyc/t4Qpc0KPNZ49Z/adk73vruZ/ysxK4Zno7rt//5TpacsweA/Ztf1GZIi+qcPwScOWvdBuC2\nzDwBuK16DPBa4ITqax3w/nrC1Lhd+fCTUzOkPU2/6xw+hPmshrznmnfynmve2XYYi9b/5mGmSLdl\nweKcmZ8HHp+1ehWwqVreBKzuW3999twBHBERx9QVrKTRmM9SNww7IWxpZu6ulvfw9BSAZcCjfdvt\nrNbtZpaIWEfv3bikdpnPGqtSh7NLNvKEsMxMIId43sbMXJmZK0eNQRrGFA9tz8t81rT7i1/6xSLe\nTAxbnPfODG9V3/dV63cBx/Ztt7xaJ6lc5rNUmGGL82ZgTbW8Brilb/0F0XMq8O2+4TJJZTKfNRYv\nuO5vecF1f9t2GJ20mEupbgJeBRwdETuBy4ErgI9ExFrgEeDcavNP07vsYge9Sy/ePIaYVTOHd6eH\n+aymdL0o/8Uv/SK/TXuXVS1YnDNzvpujnjHHtgm8bdSgJI2H+Sx1g7fvlCRpDj++OUkLHbTFWVNt\nZkj/kuMPbzkSddX6pdcAz7wz1jTr+nB2Kby3tiRJhbE4T7Epv42lVKuZDlqTp41rny3OkiQVxuIs\nSaqF55vr44QwSarJJE4Om2+4vv93tCjXz85ZkqTC2DlLUs263kEvZnJb/zbX/uGxB9lSw7A4S9KY\nrF96TWcKtLPNy+KwtiRJhbFzluhd8+1dwjQOc3WkpXTTdsvlsjhLUsNmF8VRi7VFthnX/uGxrL30\n0Ub25bC2JEmFsXOWpJbZ+Wo2i/MU6z/HOu332PZ8s6SSOKwtSVJh7JwFHNg5TksnbccsqUQWZ83p\nkuMPn8gCbTGW1AUOa0uSVBg7Z81rkC5zXF22na6kaWTnLElSYRbsnCPiOuBsYF9mnlStey/wOuD7\nwFeBN2fmE9XPLgXWAj8Efi8z/25MsasgdrjdYD5L3bCYzvlDwJmz1t0KnJSZ/xn4CnApQEScCJwH\n/Hz1nP8bEYfUFq2kUX0I81kayNpLH/3xV1MWLM6Z+Xng8VnrPpuZ+6uHdwDLq+VVwM2Z+b3M/Bqw\nAzilxngljcB8lrqhjnPObwE+Uy0vA/rfWuys1h0gItZFxNaI2FpDDJLqYT5rYE13ldNgpNnaEXEZ\nsB+4YdDnZuZGYGP1OjlKHJJGZz5rVLML9LV/eGxLkYyu7TcbQxfniHgTvYklZ2TmTDLuAvr/Gsur\ndZIKZj5LZRmqOEfEmcB64JWZ+VTfjzYDN0bE1cBPAScA/zJylJLGxnzWuMzVfZbWTbfdIc8nnn6T\nPM8GETcBrwKOBvYCl9ObzfkTwGPVZndk5u9U219G77zVfuCizPzM7NecYx8Og0kH2paZK+t8QfNZ\npaqjaJdaaAEyMwbZfsHi3ASTWZpT7cW5CeazhmFxfiaLs1Qui7Om2mIKdskFuZ/FWZocFmdpQgxa\nnL23tiRJhbE4S5JUGIuzJEmFsThLklSYkW7fWaNvAv9RfS/R0ZQZW6lxQbmxlRoXHBjbT7cVyIj+\nHXiw7SDm0aW/fylKjQu6E9vAuVzEbG2AiNha6szUUmMrNS4oN7ZS44KyYxtEyb+HsQ2u1LhgsmNz\nWFuSpMJYnCVJKkxJxXlj2wEcRKmxlRoXlBtbqXFB2bENouTfw9gGV2pcMMGxFXPOWZIk9ZTUOUuS\nJCzOkiQVp/XiHBFnRsSDEbEjIja0HMuxEXF7RNwfEfdFxIXV+ndFxK6IuKv6Oqul+L4eEfdUMWyt\n1h0VEbdGxEPV9yMbjunn+o7LXRHxnYi4qK1jFhHXRcS+iLi3b92cxyh6/qz6t/fliDi5hdjeGxH/\nWu3/kxFxRLX+uIj4bt/x+8A4Y6tLKflsLg8dl/k8fFz15nJmtvYFHAJ8FTgeOBS4GzixxXiOAU6u\nlg8HvgKcCLwL+IM2j1UV09eBo2etuwrYUC1vAK5s+e+5h94F960cM+AVwMnAvQsdI+As4DNAAKcC\nW1qI7VeBJdXylX2xHde/XRe+Sspnc7m2v6f5vPi4as3ltjvnU4AdmflwZn4fuBlY1VYwmbk7M7dX\ny08CDwDL2opnkVYBm6rlTcDqFmM5A/hqZj7SVgCZ+Xng8Vmr5ztGq4Drs+cO4IiIOKbJ2DLzs5m5\nv3p4B7B8XPtvQDH5bC7XwnweIK66c7nt4rwM6P+k7J0UkkARcRzwMmBLtert1XDFdW0MN1US+GxE\nbIuIddW6pZm5u1reAyxtJzQAzgNu6ntcwjGD+Y9Raf/+3kLvnf+MFRHxpYj4x4j45baCGkBpxxMw\nl0dgPg9v5FxuuzgXKSKeC3wcuCgzvwO8H/gZ4BeA3cAftxTa6Zl5MvBa4G0R8Yr+H2ZvDKWVa+Mi\n4lDgHOCj1apSjtkztHmMDiYiLgP2AzdUq3YDL87MlwEXAzdGxPPaiq+rzOXhmM/DqyuX2y7Ou4Bj\n+x4vr9a1JiKeRS+Zb8jMTwBk5t7M/GFm/gi4ht7wXeMyc1f1fR/wySqOvTNDN9X3fW3ERu8/me2Z\nubeKsYhjVpnvGBXx7y8i3gScDbyx+s+GzPxeZj5WLW+jdy73Z5uObUBFHM8Z5vJIzOch1JnLbRfn\nO4ETImJF9U7tPGBzW8FERADXAg9k5tV96/vPW/w6cO/s5zYQ23Mi4vCZZXqTD+6ld7zWVJutAW5p\nOrbK+fQNgZVwzPrMd4w2AxdUszxPBb7dN1zWiIg4E1gPnJOZT/Wtf2FEHFItHw+cADzcZGxDKCaf\nzeWRmc8Dqj2XxzWbbbFf9GbYfYXeu4nLWo7ldHpDJF8G7qq+zgL+CrinWr8ZOKaF2I6nN/v1buC+\nmWMFvAC4DXgI+BxwVAuxPQd4DHh+37pWjhm9/1B2Az+gd85p7XzHiN6szj+v/u3dA6xsIbYd9M6T\nzfx7+0C17eurv/NdwHbgdU3/XYf8HYvIZ3N5pPjM5+HiqjWXvX2nJEmFaXtYW5IkzWJxliSpMBZn\nSZIKY3GWJKkwFmdJkgpjcZYkqTAWZ0mSCvP/ASpRAss5NWLXAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + } + ] +} \ No newline at end of file diff --git a/demo/predictor.py b/demo/predictor.py index b152fda8d..fa663c7e4 100644 --- a/demo/predictor.py +++ b/demo/predictor.py @@ -2,7 +2,7 @@ import cv2 import torch from torchvision import transforms as T - +from torchvision.transforms import functional as F from maskrcnn_benchmark.modeling.detector import build_detection_model from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer from maskrcnn_benchmark.structures.image_list import to_image_list @@ -10,7 +10,38 @@ from maskrcnn_benchmark import layers as L from maskrcnn_benchmark.utils import cv2_util +class Resize(object): + def __init__(self, min_size, max_size): + self.min_size = min_size + self.max_size = max_size + + # modified from torchvision to add support for max size + def get_size(self, image_size): + w, h = image_size + size = self.min_size + max_size = self.max_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + def __call__(self, image): + size = self.get_size(image.size) + image = F.resize(image, size) + return image class COCODemo(object): # COCO categories for pretty print CATEGORIES = [ @@ -147,11 +178,12 @@ def build_transform(self): normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD ) - + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST transform = T.Compose( [ T.ToPILImage(), - T.Resize(self.min_image_size), + Resize(min_size, max_size), T.ToTensor(), to_bgr_transform, normalize_transform, diff --git a/demo/shapes_dataset_demo.ipynb b/demo/shapes_dataset_demo.ipynb new file mode 100644 index 000000000..977f123c0 --- /dev/null +++ b/demo/shapes_dataset_demo.ipynb @@ -0,0 +1,3085 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "objdet.ipynb", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [ + "xnr8tbDz7WjS", + "5DC0K7tW7d-M", + "BI2ncK7kATEh", + "hbzY16ocEdrg", + "If8z4OZfDHmC", + "mOo-0LGFEAmc", + "bbCBInqHFUg7", + "tAn3omCjTFGI", + "BTKsrHa-TkGr" + ] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "268x1mG64rCy", + "colab_type": "text" + }, + "source": [ + "# Installation" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VNvKG2TF3Y0B", + "colab_type": "code", + "outputId": "64393040-91f6-49e8-8656-f8de9480b867", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "%%writefile setup.sh\n", + "\n", + "# maskrcnn_benchmark and coco api dependencies\n", + "pip install ninja yacs cython matplotlib tqdm opencv-python\n", + "\n", + "# follow PyTorch installation in https://pytorch.org/get-started/locally/\n", + "# we give the instructions for CUDA 9.0\n", + "pip install -c pytorch pytorch-nightly torchvision cudatoolkit=9.0\n", + "\n", + "\n", + "git clone https://github.com/cocodataset/cocoapi.git\n", + "cd cocoapi/PythonAPI\n", + "python setup.py build_ext install\n", + "cd ../../\n", + "\n", + "# install apex\n", + "rm -rf apex\n", + "git clone https://github.com/NVIDIA/apex.git\n", + "cd apex\n", + "git pull\n", + "python setup.py install --cuda_ext --cpp_ext\n", + "cd ../\n", + "\n", + "# install PyTorch Detection\n", + "git clone https://github.com/facebookresearch/maskrcnn-benchmark.git\n", + "cd maskrcnn-benchmark\n", + "\n", + "# the following will install the lib with\n", + "# symbolic links, so that you can modify\n", + "# the files if you want and won't need to\n", + "# re-build it\n", + "python setup.py build develop\n" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing setup.sh\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NYzsp3Ng3mOy", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 13705 + }, + "outputId": "e1230ab4-a5b6-41b7-fa69-5b175a20b26d" + }, + "source": [ + "!sh setup.sh" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Collecting ninja\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/cc/bf/32e5dd5cce6543374e4050a7292099402ab80787eddf3732810a55b37763/ninja-1.9.0.post1-py3-none-manylinux1_x86_64.whl (98kB)\n", + "\u001b[K |████████████████████████████████| 102kB 32.5MB/s \n", + "\u001b[?25hCollecting yacs\n", + " Downloading https://files.pythonhosted.org/packages/2f/51/9d613d67a8561a0cdf696c3909870f157ed85617fea3cff769bb7de09ef7/yacs-0.1.6-py3-none-any.whl\n", + "Requirement already satisfied: cython in /usr/local/lib/python3.6/dist-packages (0.29.9)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (3.0.3)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (4.28.1)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (3.4.5.20)\n", + "Requirement already satisfied: PyYAML in /usr/local/lib/python3.6/dist-packages (from yacs) (3.13)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (0.10.0)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.4.0)\n", + "Requirement already satisfied: numpy>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.16.4)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.1.0)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.5.3)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib) (1.12.0)\n", + "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib) (41.0.1)\n", + "Installing collected packages: ninja, yacs\n", + "Successfully installed ninja-1.9.0.post1 yacs-0.1.6\n", + "\u001b[31mERROR: Could not open requirements file: [Errno 2] No such file or directory: 'pytorch'\u001b[0m\n", + "Cloning into 'cocoapi'...\n", + "remote: Enumerating objects: 953, done.\u001b[K\n", + "remote: Total 953 (delta 0), reused 0 (delta 0), pack-reused 953\u001b[K\n", + "Receiving objects: 100% (953/953), 11.70 MiB | 6.53 MiB/s, done.\n", + "Resolving deltas: 100% (565/565), done.\n", + "running build_ext\n", + "cythoning pycocotools/_mask.pyx to pycocotools/_mask.c\n", + "/usr/local/lib/python3.6/dist-packages/Cython/Compiler/Main.py:367: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /content/cocoapi/PythonAPI/pycocotools/_mask.pyx\n", + " tree = Parsing.p_module(s, pxd, full_module_name)\n", + "building 'pycocotools._mask' extension\n", + "creating build\n", + "creating build/common\n", + "creating build/temp.linux-x86_64-3.6\n", + "creating build/temp.linux-x86_64-3.6/pycocotools\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/numpy/core/include -I../common -I/usr/include/python3.6m -c ../common/maskApi.c -o build/temp.linux-x86_64-3.6/../common/maskApi.o -Wno-cpp -Wno-unused-function -std=c99\n", + "\u001b[01m\u001b[K../common/maskApi.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KrleDecode\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K../common/maskApi.c:46:7:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kthis ‘\u001b[01m\u001b[Kfor\u001b[m\u001b[K’ clause does not guard... [\u001b[01;35m\u001b[K-Wmisleading-indentation\u001b[m\u001b[K]\n", + " \u001b[01;35m\u001b[Kfor\u001b[m\u001b[K( k=0; k2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x;\n", + " \u001b[01;35m\u001b[K^~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K../common/maskApi.c:228:34:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[K...this statement, but the latter is misleadingly indented as if it were guarded by the ‘\u001b[01m\u001b[Kif\u001b[m\u001b[K’\n", + " if(m>2) x+=(long) cnts[m-2]; \u001b[01;36m\u001b[Kcnts\u001b[m\u001b[K[m++]=(uint) x;\n", + " \u001b[01;36m\u001b[K^~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K../common/maskApi.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KrleToBbox\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K../common/maskApi.c:141:31:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kxp\u001b[m\u001b[K’ may be used uninitialized in this function [\u001b[01;35m\u001b[K-Wmaybe-uninitialized\u001b[m\u001b[K]\n", + " if(j%2==0) xp=x; else if\u001b[01;35m\u001b[K(\u001b[m\u001b[Kxp build/lib.linux-x86_64-3.6/pycocotools\n", + "copying pycocotools/mask.py -> build/lib.linux-x86_64-3.6/pycocotools\n", + "copying pycocotools/coco.py -> build/lib.linux-x86_64-3.6/pycocotools\n", + "copying pycocotools/cocoeval.py -> build/lib.linux-x86_64-3.6/pycocotools\n", + "creating build/bdist.linux-x86_64\n", + "creating build/bdist.linux-x86_64/egg\n", + "creating build/bdist.linux-x86_64/egg/pycocotools\n", + "copying build/lib.linux-x86_64-3.6/pycocotools/__init__.py -> build/bdist.linux-x86_64/egg/pycocotools\n", + "copying build/lib.linux-x86_64-3.6/pycocotools/mask.py -> build/bdist.linux-x86_64/egg/pycocotools\n", + "copying build/lib.linux-x86_64-3.6/pycocotools/_mask.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/pycocotools\n", + "copying build/lib.linux-x86_64-3.6/pycocotools/coco.py -> build/bdist.linux-x86_64/egg/pycocotools\n", + "copying build/lib.linux-x86_64-3.6/pycocotools/cocoeval.py -> build/bdist.linux-x86_64/egg/pycocotools\n", + "byte-compiling build/bdist.linux-x86_64/egg/pycocotools/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/pycocotools/mask.py to mask.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/pycocotools/coco.py to coco.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/pycocotools/cocoeval.py to cocoeval.cpython-36.pyc\n", + "creating stub loader for pycocotools/_mask.cpython-36m-x86_64-linux-gnu.so\n", + "byte-compiling build/bdist.linux-x86_64/egg/pycocotools/_mask.py to _mask.cpython-36.pyc\n", + "creating build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying pycocotools.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying pycocotools.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying pycocotools.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying pycocotools.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying pycocotools.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt\n", + "zip_safe flag not set; analyzing archive contents...\n", + "pycocotools.__pycache__._mask.cpython-36: module references __file__\n", + "creating dist\n", + "creating 'dist/pycocotools-2.0-py3.6-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it\n", + "removing 'build/bdist.linux-x86_64/egg' (and everything under it)\n", + "Processing pycocotools-2.0-py3.6-linux-x86_64.egg\n", + "creating /usr/local/lib/python3.6/dist-packages/pycocotools-2.0-py3.6-linux-x86_64.egg\n", + "Extracting pycocotools-2.0-py3.6-linux-x86_64.egg to /usr/local/lib/python3.6/dist-packages\n", + "Adding pycocotools 2.0 to easy-install.pth file\n", + "\n", + "Installed /usr/local/lib/python3.6/dist-packages/pycocotools-2.0-py3.6-linux-x86_64.egg\n", + "Processing dependencies for pycocotools==2.0\n", + "Searching for matplotlib==3.0.3\n", + "Best match: matplotlib 3.0.3\n", + "Adding matplotlib 3.0.3 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for Cython==0.29.9\n", + "Best match: Cython 0.29.9\n", + "Adding Cython 0.29.9 to easy-install.pth file\n", + "Installing cygdb script to /usr/local/bin\n", + "Installing cython script to /usr/local/bin\n", + "Installing cythonize script to /usr/local/bin\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for setuptools==41.0.1\n", + "Best match: setuptools 41.0.1\n", + "Adding setuptools 41.0.1 to easy-install.pth file\n", + "Installing easy_install script to /usr/local/bin\n", + "Installing easy_install-3.6 script to /usr/local/bin\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for pyparsing==2.4.0\n", + "Best match: pyparsing 2.4.0\n", + "Adding pyparsing 2.4.0 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for kiwisolver==1.1.0\n", + "Best match: kiwisolver 1.1.0\n", + "Adding kiwisolver 1.1.0 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for python-dateutil==2.5.3\n", + "Best match: python-dateutil 2.5.3\n", + "Adding python-dateutil 2.5.3 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for numpy==1.16.4\n", + "Best match: numpy 1.16.4\n", + "Adding numpy 1.16.4 to easy-install.pth file\n", + "Installing f2py script to /usr/local/bin\n", + "Installing f2py3 script to /usr/local/bin\n", + "Installing f2py3.6 script to /usr/local/bin\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for cycler==0.10.0\n", + "Best match: cycler 0.10.0\n", + "Adding cycler 0.10.0 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for six==1.12.0\n", + "Best match: six 1.12.0\n", + "Adding six 1.12.0 to easy-install.pth file\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Finished processing dependencies for pycocotools==2.0\n", + "Cloning into 'apex'...\n", + "remote: Enumerating objects: 28, done.\u001b[K\n", + "remote: Counting objects: 100% (28/28), done.\u001b[K\n", + "remote: Compressing objects: 100% (25/25), done.\u001b[K\n", + "remote: Total 4606 (delta 12), reused 7 (delta 3), pack-reused 4578\u001b[K\n", + "Receiving objects: 100% (4606/4606), 8.68 MiB | 6.00 MiB/s, done.\n", + "Resolving deltas: 100% (2982/2982), done.\n", + "Already up to date.\n", + "torch.__version__ = 1.1.0\n", + "\n", + "Compiling cuda extensions with\n", + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2018 NVIDIA Corporation\n", + "Built on Sat_Aug_25_21:08:01_CDT_2018\n", + "Cuda compilation tools, release 10.0, V10.0.130\n", + "from /usr/local/cuda/bin\n", + "\n", + "running install\n", + "running bdist_egg\n", + "running egg_info\n", + "creating apex.egg-info\n", + "writing apex.egg-info/PKG-INFO\n", + "writing dependency_links to apex.egg-info/dependency_links.txt\n", + "writing top-level names to apex.egg-info/top_level.txt\n", + "writing manifest file 'apex.egg-info/SOURCES.txt'\n", + "writing manifest file 'apex.egg-info/SOURCES.txt'\n", + "installing library code to build/bdist.linux-x86_64/egg\n", + "running install_lib\n", + "running build_py\n", + "creating build\n", + "creating build/lib.linux-x86_64-3.6\n", + "creating build/lib.linux-x86_64-3.6/apex\n", + "copying apex/__init__.py -> build/lib.linux-x86_64-3.6/apex\n", + "creating build/lib.linux-x86_64-3.6/apex/reparameterization\n", + "copying apex/reparameterization/__init__.py -> build/lib.linux-x86_64-3.6/apex/reparameterization\n", + "copying apex/reparameterization/weight_norm.py -> build/lib.linux-x86_64-3.6/apex/reparameterization\n", + "copying apex/reparameterization/reparameterization.py -> build/lib.linux-x86_64-3.6/apex/reparameterization\n", + "creating build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/sync_batchnorm_kernel.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/__init__.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/sync_batchnorm.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/LARC.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/multiproc.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/distributed.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/optimized_sync_batchnorm.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "copying apex/parallel/optimized_sync_batchnorm_kernel.py -> build/lib.linux-x86_64-3.6/apex/parallel\n", + "creating build/lib.linux-x86_64-3.6/apex/fp16_utils\n", + "copying apex/fp16_utils/__init__.py -> build/lib.linux-x86_64-3.6/apex/fp16_utils\n", + "copying apex/fp16_utils/fp16_optimizer.py -> build/lib.linux-x86_64-3.6/apex/fp16_utils\n", + "copying apex/fp16_utils/fp16util.py -> build/lib.linux-x86_64-3.6/apex/fp16_utils\n", + "copying apex/fp16_utils/loss_scaler.py -> build/lib.linux-x86_64-3.6/apex/fp16_utils\n", + "creating build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/rnn_compat.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/frontend.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/wrap.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/__init__.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/amp.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/compat.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/_process_optimizer.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/__version__.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/opt.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/_initialize.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/_amp_state.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/utils.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/handle.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "copying apex/amp/scaler.py -> build/lib.linux-x86_64-3.6/apex/amp\n", + "creating build/lib.linux-x86_64-3.6/apex/normalization\n", + "copying apex/normalization/__init__.py -> build/lib.linux-x86_64-3.6/apex/normalization\n", + "copying apex/normalization/fused_layer_norm.py -> build/lib.linux-x86_64-3.6/apex/normalization\n", + "creating build/lib.linux-x86_64-3.6/apex/multi_tensor_apply\n", + "copying apex/multi_tensor_apply/__init__.py -> build/lib.linux-x86_64-3.6/apex/multi_tensor_apply\n", + "copying apex/multi_tensor_apply/multi_tensor_apply.py -> build/lib.linux-x86_64-3.6/apex/multi_tensor_apply\n", + "creating build/lib.linux-x86_64-3.6/apex/optimizers\n", + "copying apex/optimizers/fused_adam.py -> build/lib.linux-x86_64-3.6/apex/optimizers\n", + "copying apex/optimizers/__init__.py -> build/lib.linux-x86_64-3.6/apex/optimizers\n", + "copying apex/optimizers/fp16_optimizer.py -> build/lib.linux-x86_64-3.6/apex/optimizers\n", + "creating build/lib.linux-x86_64-3.6/apex/RNN\n", + "copying apex/RNN/__init__.py -> build/lib.linux-x86_64-3.6/apex/RNN\n", + "copying apex/RNN/RNNBackend.py -> build/lib.linux-x86_64-3.6/apex/RNN\n", + "copying apex/RNN/models.py -> build/lib.linux-x86_64-3.6/apex/RNN\n", + "copying apex/RNN/cells.py -> build/lib.linux-x86_64-3.6/apex/RNN\n", + "creating build/lib.linux-x86_64-3.6/apex/amp/lists\n", + "copying apex/amp/lists/__init__.py -> build/lib.linux-x86_64-3.6/apex/amp/lists\n", + "copying apex/amp/lists/torch_overrides.py -> build/lib.linux-x86_64-3.6/apex/amp/lists\n", + "copying apex/amp/lists/functional_overrides.py -> build/lib.linux-x86_64-3.6/apex/amp/lists\n", + "copying apex/amp/lists/tensor_overrides.py -> build/lib.linux-x86_64-3.6/apex/amp/lists\n", + "running build_ext\n", + "building 'apex_C' extension\n", + "creating build/temp.linux-x86_64-3.6\n", + "creating build/temp.linux-x86_64-3.6/csrc\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/include/python3.6m -c csrc/flatten_unflatten.cpp -o build/temp.linux-x86_64-3.6/csrc/flatten_unflatten.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=apex_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/csrc/flatten_unflatten.o -o build/lib.linux-x86_64-3.6/apex_C.cpython-36m-x86_64-linux-gnu.so\n", + "building 'amp_C' extension\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/amp_C_frontend.cpp -o build/temp.linux-x86_64-3.6/csrc/amp_C_frontend.o -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/multi_tensor_scale_kernel.cu -o build/temp.linux-x86_64-3.6/csrc/multi_tensor_scale_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -lineinfo -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/multi_tensor_axpby_kernel.cu -o build/temp.linux-x86_64-3.6/csrc/multi_tensor_axpby_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -lineinfo -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/multi_tensor_l2norm_kernel.cu -o build/temp.linux-x86_64-3.6/csrc/multi_tensor_l2norm_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -lineinfo -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/multi_tensor_lamb_stage_1.cu -o build/temp.linux-x86_64-3.6/csrc/multi_tensor_lamb_stage_1.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -lineinfo -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/multi_tensor_lamb_stage_2.cu -o build/temp.linux-x86_64-3.6/csrc/multi_tensor_lamb_stage_2.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -lineinfo -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=amp_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/csrc/amp_C_frontend.o build/temp.linux-x86_64-3.6/csrc/multi_tensor_scale_kernel.o build/temp.linux-x86_64-3.6/csrc/multi_tensor_axpby_kernel.o build/temp.linux-x86_64-3.6/csrc/multi_tensor_l2norm_kernel.o build/temp.linux-x86_64-3.6/csrc/multi_tensor_lamb_stage_1.o build/temp.linux-x86_64-3.6/csrc/multi_tensor_lamb_stage_2.o -L/usr/local/cuda/lib64 -lcudart -o build/lib.linux-x86_64-3.6/amp_C.cpython-36m-x86_64-linux-gnu.so\n", + "building 'fused_adam_cuda' extension\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/fused_adam_cuda.cpp -o build/temp.linux-x86_64-3.6/csrc/fused_adam_cuda.o -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=fused_adam_cuda -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/fused_adam_cuda_kernel.cu -o build/temp.linux-x86_64-3.6/csrc/fused_adam_cuda_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -O3 --use_fast_math -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=fused_adam_cuda -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/csrc/fused_adam_cuda.o build/temp.linux-x86_64-3.6/csrc/fused_adam_cuda_kernel.o -L/usr/local/cuda/lib64 -lcudart -o build/lib.linux-x86_64-3.6/fused_adam_cuda.cpython-36m-x86_64-linux-gnu.so\n", + "building 'syncbn' extension\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/syncbn.cpp -o build/temp.linux-x86_64-3.6/csrc/syncbn.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=syncbn -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/welford.cu -o build/temp.linux-x86_64-3.6/csrc/welford.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=syncbn -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/csrc/syncbn.o build/temp.linux-x86_64-3.6/csrc/welford.o -L/usr/local/cuda/lib64 -lcudart -o build/lib.linux-x86_64-3.6/syncbn.cpython-36m-x86_64-linux-gnu.so\n", + "building 'fused_layer_norm_cuda' extension\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/layer_norm_cuda.cpp -o build/temp.linux-x86_64-3.6/csrc/layer_norm_cuda.o -O3 -DVERSION_GE_1_1 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=fused_layer_norm_cuda -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c csrc/layer_norm_cuda_kernel.cu -o build/temp.linux-x86_64-3.6/csrc/layer_norm_cuda_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -maxrregcount=50 -O3 --use_fast_math -DVERSION_GE_1_1 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=fused_layer_norm_cuda -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/csrc/layer_norm_cuda.o build/temp.linux-x86_64-3.6/csrc/layer_norm_cuda_kernel.o -L/usr/local/cuda/lib64 -lcudart -o build/lib.linux-x86_64-3.6/fused_layer_norm_cuda.cpython-36m-x86_64-linux-gnu.so\n", + "creating build/bdist.linux-x86_64\n", + "creating build/bdist.linux-x86_64/egg\n", + "copying build/lib.linux-x86_64-3.6/fused_adam_cuda.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg\n", + "copying build/lib.linux-x86_64-3.6/apex_C.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg\n", + "copying build/lib.linux-x86_64-3.6/syncbn.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg\n", + "creating build/bdist.linux-x86_64/egg/apex\n", + "creating build/bdist.linux-x86_64/egg/apex/reparameterization\n", + "copying build/lib.linux-x86_64-3.6/apex/reparameterization/__init__.py -> build/bdist.linux-x86_64/egg/apex/reparameterization\n", + "copying build/lib.linux-x86_64-3.6/apex/reparameterization/weight_norm.py -> build/bdist.linux-x86_64/egg/apex/reparameterization\n", + "copying build/lib.linux-x86_64-3.6/apex/reparameterization/reparameterization.py -> build/bdist.linux-x86_64/egg/apex/reparameterization\n", + "copying build/lib.linux-x86_64-3.6/apex/__init__.py -> build/bdist.linux-x86_64/egg/apex\n", + "creating build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/sync_batchnorm_kernel.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/__init__.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/sync_batchnorm.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/LARC.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/multiproc.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/distributed.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/optimized_sync_batchnorm.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "copying build/lib.linux-x86_64-3.6/apex/parallel/optimized_sync_batchnorm_kernel.py -> build/bdist.linux-x86_64/egg/apex/parallel\n", + "creating build/bdist.linux-x86_64/egg/apex/fp16_utils\n", + "copying build/lib.linux-x86_64-3.6/apex/fp16_utils/__init__.py -> build/bdist.linux-x86_64/egg/apex/fp16_utils\n", + "copying build/lib.linux-x86_64-3.6/apex/fp16_utils/fp16_optimizer.py -> build/bdist.linux-x86_64/egg/apex/fp16_utils\n", + "copying build/lib.linux-x86_64-3.6/apex/fp16_utils/fp16util.py -> build/bdist.linux-x86_64/egg/apex/fp16_utils\n", + "copying build/lib.linux-x86_64-3.6/apex/fp16_utils/loss_scaler.py -> build/bdist.linux-x86_64/egg/apex/fp16_utils\n", + "creating build/bdist.linux-x86_64/egg/apex/amp\n", + "creating build/bdist.linux-x86_64/egg/apex/amp/lists\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/lists/__init__.py -> build/bdist.linux-x86_64/egg/apex/amp/lists\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/lists/torch_overrides.py -> build/bdist.linux-x86_64/egg/apex/amp/lists\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/lists/functional_overrides.py -> build/bdist.linux-x86_64/egg/apex/amp/lists\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/lists/tensor_overrides.py -> build/bdist.linux-x86_64/egg/apex/amp/lists\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/rnn_compat.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/frontend.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/wrap.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/__init__.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/amp.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/compat.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/_process_optimizer.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/__version__.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/opt.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/_initialize.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/_amp_state.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/utils.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/handle.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "copying build/lib.linux-x86_64-3.6/apex/amp/scaler.py -> build/bdist.linux-x86_64/egg/apex/amp\n", + "creating build/bdist.linux-x86_64/egg/apex/normalization\n", + "copying build/lib.linux-x86_64-3.6/apex/normalization/__init__.py -> build/bdist.linux-x86_64/egg/apex/normalization\n", + "copying build/lib.linux-x86_64-3.6/apex/normalization/fused_layer_norm.py -> build/bdist.linux-x86_64/egg/apex/normalization\n", + "creating build/bdist.linux-x86_64/egg/apex/multi_tensor_apply\n", + "copying build/lib.linux-x86_64-3.6/apex/multi_tensor_apply/__init__.py -> build/bdist.linux-x86_64/egg/apex/multi_tensor_apply\n", + "copying build/lib.linux-x86_64-3.6/apex/multi_tensor_apply/multi_tensor_apply.py -> build/bdist.linux-x86_64/egg/apex/multi_tensor_apply\n", + "creating build/bdist.linux-x86_64/egg/apex/optimizers\n", + "copying build/lib.linux-x86_64-3.6/apex/optimizers/fused_adam.py -> build/bdist.linux-x86_64/egg/apex/optimizers\n", + "copying build/lib.linux-x86_64-3.6/apex/optimizers/__init__.py -> build/bdist.linux-x86_64/egg/apex/optimizers\n", + "copying build/lib.linux-x86_64-3.6/apex/optimizers/fp16_optimizer.py -> build/bdist.linux-x86_64/egg/apex/optimizers\n", + "creating build/bdist.linux-x86_64/egg/apex/RNN\n", + "copying build/lib.linux-x86_64-3.6/apex/RNN/__init__.py -> build/bdist.linux-x86_64/egg/apex/RNN\n", + "copying build/lib.linux-x86_64-3.6/apex/RNN/RNNBackend.py -> build/bdist.linux-x86_64/egg/apex/RNN\n", + "copying build/lib.linux-x86_64-3.6/apex/RNN/models.py -> build/bdist.linux-x86_64/egg/apex/RNN\n", + "copying build/lib.linux-x86_64-3.6/apex/RNN/cells.py -> build/bdist.linux-x86_64/egg/apex/RNN\n", + "copying build/lib.linux-x86_64-3.6/fused_layer_norm_cuda.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg\n", + "copying build/lib.linux-x86_64-3.6/amp_C.cpython-36m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/reparameterization/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/reparameterization/weight_norm.py to weight_norm.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/reparameterization/reparameterization.py to reparameterization.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/sync_batchnorm_kernel.py to sync_batchnorm_kernel.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/sync_batchnorm.py to sync_batchnorm.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/LARC.py to LARC.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/multiproc.py to multiproc.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/distributed.py to distributed.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/optimized_sync_batchnorm.py to optimized_sync_batchnorm.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/parallel/optimized_sync_batchnorm_kernel.py to optimized_sync_batchnorm_kernel.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/fp16_utils/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/fp16_utils/fp16_optimizer.py to fp16_optimizer.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/fp16_utils/fp16util.py to fp16util.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/fp16_utils/loss_scaler.py to loss_scaler.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/lists/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/lists/torch_overrides.py to torch_overrides.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/lists/functional_overrides.py to functional_overrides.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/lists/tensor_overrides.py to tensor_overrides.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/rnn_compat.py to rnn_compat.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/frontend.py to frontend.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/wrap.py to wrap.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/amp.py to amp.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/compat.py to compat.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/_process_optimizer.py to _process_optimizer.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/__version__.py to __version__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/opt.py to opt.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/_initialize.py to _initialize.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/_amp_state.py to _amp_state.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/utils.py to utils.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/handle.py to handle.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/amp/scaler.py to scaler.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/normalization/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/normalization/fused_layer_norm.py to fused_layer_norm.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/multi_tensor_apply/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/multi_tensor_apply/multi_tensor_apply.py to multi_tensor_apply.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/optimizers/fused_adam.py to fused_adam.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/optimizers/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/optimizers/fp16_optimizer.py to fp16_optimizer.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/RNN/__init__.py to __init__.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/RNN/RNNBackend.py to RNNBackend.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/RNN/models.py to models.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex/RNN/cells.py to cells.cpython-36.pyc\n", + "creating stub loader for apex_C.cpython-36m-x86_64-linux-gnu.so\n", + "creating stub loader for amp_C.cpython-36m-x86_64-linux-gnu.so\n", + "creating stub loader for fused_adam_cuda.cpython-36m-x86_64-linux-gnu.so\n", + "creating stub loader for syncbn.cpython-36m-x86_64-linux-gnu.so\n", + "creating stub loader for fused_layer_norm_cuda.cpython-36m-x86_64-linux-gnu.so\n", + "byte-compiling build/bdist.linux-x86_64/egg/apex_C.py to apex_C.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/amp_C.py to amp_C.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/fused_adam_cuda.py to fused_adam_cuda.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/syncbn.py to syncbn.cpython-36.pyc\n", + "byte-compiling build/bdist.linux-x86_64/egg/fused_layer_norm_cuda.py to fused_layer_norm_cuda.cpython-36.pyc\n", + "creating build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying apex.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying apex.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying apex.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying apex.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt\n", + "zip_safe flag not set; analyzing archive contents...\n", + "__pycache__.amp_C.cpython-36: module references __file__\n", + "__pycache__.apex_C.cpython-36: module references __file__\n", + "__pycache__.fused_adam_cuda.cpython-36: module references __file__\n", + "__pycache__.fused_layer_norm_cuda.cpython-36: module references __file__\n", + "__pycache__.syncbn.cpython-36: module references __file__\n", + "creating dist\n", + "creating 'dist/apex-0.1-py3.6-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it\n", + "removing 'build/bdist.linux-x86_64/egg' (and everything under it)\n", + "Processing apex-0.1-py3.6-linux-x86_64.egg\n", + "creating /usr/local/lib/python3.6/dist-packages/apex-0.1-py3.6-linux-x86_64.egg\n", + "Extracting apex-0.1-py3.6-linux-x86_64.egg to /usr/local/lib/python3.6/dist-packages\n", + "Adding apex 0.1 to easy-install.pth file\n", + "\n", + "Installed /usr/local/lib/python3.6/dist-packages/apex-0.1-py3.6-linux-x86_64.egg\n", + "Processing dependencies for apex==0.1\n", + "Finished processing dependencies for apex==0.1\n", + "Cloning into 'maskrcnn-benchmark'...\n", + "remote: Enumerating objects: 1, done.\u001b[K\n", + "remote: Counting objects: 100% (1/1), done.\u001b[K\n", + "remote: Total 1524 (delta 0), reused 1 (delta 0), pack-reused 1523\u001b[K\n", + "Receiving objects: 100% (1524/1524), 6.31 MiB | 4.94 MiB/s, done.\n", + "Resolving deltas: 100% (917/917), done.\n", + "running build\n", + "running build_py\n", + "creating build\n", + "creating build/lib.linux-x86_64-3.6\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark\n", + "copying maskrcnn_benchmark/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine\n", + "copying maskrcnn_benchmark/engine/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine\n", + "copying maskrcnn_benchmark/engine/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine\n", + "copying maskrcnn_benchmark/engine/bbox_aug.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine\n", + "copying maskrcnn_benchmark/engine/trainer.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/solver\n", + "copying maskrcnn_benchmark/solver/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/solver\n", + "copying maskrcnn_benchmark/solver/build.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/solver\n", + "copying maskrcnn_benchmark/solver/lr_scheduler.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/solver\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/config\n", + "copying maskrcnn_benchmark/config/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/config\n", + "copying maskrcnn_benchmark/config/defaults.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/config\n", + "copying maskrcnn_benchmark/config/paths_catalog.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/config\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/sigmoid_focal_loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/misc.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/smooth_l1_loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/roi_align.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/_utils.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/nms.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/batch_norm.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "copying maskrcnn_benchmark/layers/roi_pool.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data\n", + "copying maskrcnn_benchmark/data/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data\n", + "copying maskrcnn_benchmark/data/collate_batch.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data\n", + "copying maskrcnn_benchmark/data/build.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/boxlist_ops.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/image_list.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/segmentation_mask.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/keypoint.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "copying maskrcnn_benchmark/structures/bounding_box.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/structures\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/box_coder.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/matcher.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/registry.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/utils.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/poolers.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "copying maskrcnn_benchmark/modeling/make_layers.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/comm.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/checkpoint.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/metric_logger.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/model_serialization.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/miscellaneous.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/env.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/imports.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/timer.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/cv2_util.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/c2_model_loading.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/registry.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/collect_env.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/model_zoo.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "copying maskrcnn_benchmark/utils/logger.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/utils\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "copying maskrcnn_benchmark/layers/dcn/deform_conv_module.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "copying maskrcnn_benchmark/layers/dcn/deform_pool_module.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "copying maskrcnn_benchmark/layers/dcn/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "copying maskrcnn_benchmark/layers/dcn/deform_pool_func.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "copying maskrcnn_benchmark/layers/dcn/deform_conv_func.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/layers/dcn\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/transforms\n", + "copying maskrcnn_benchmark/data/transforms/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/transforms\n", + "copying maskrcnn_benchmark/data/transforms/build.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/transforms\n", + "copying maskrcnn_benchmark/data/transforms/transforms.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/transforms\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/samplers\n", + "copying maskrcnn_benchmark/data/samplers/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/samplers\n", + "copying maskrcnn_benchmark/data/samplers/distributed.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/samplers\n", + "copying maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/samplers\n", + "copying maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/samplers\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "copying maskrcnn_benchmark/data/datasets/voc.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "copying maskrcnn_benchmark/data/datasets/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "copying maskrcnn_benchmark/data/datasets/coco.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "copying maskrcnn_benchmark/data/datasets/concat_dataset.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "copying maskrcnn_benchmark/data/datasets/list_dataset.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation\n", + "copying maskrcnn_benchmark/data/datasets/evaluation/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/coco\n", + "copying maskrcnn_benchmark/data/datasets/evaluation/coco/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/coco\n", + "copying maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/coco\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/voc\n", + "copying maskrcnn_benchmark/data/datasets/evaluation/voc/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/voc\n", + "copying maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/data/datasets/evaluation/voc\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/anchor_generator.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/rpn.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "copying maskrcnn_benchmark/modeling/rpn/utils.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/detector\n", + "copying maskrcnn_benchmark/modeling/detector/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/detector\n", + "copying maskrcnn_benchmark/modeling/detector/detectors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/detector\n", + "copying maskrcnn_benchmark/modeling/detector/generalized_rcnn.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/detector\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads\n", + "copying maskrcnn_benchmark/modeling/roi_heads/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads\n", + "copying maskrcnn_benchmark/modeling/roi_heads/roi_heads.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/fbnet.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/fpn.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/fbnet_builder.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/backbone.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "copying maskrcnn_benchmark/modeling/backbone/resnet.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/backbone\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn/retinanet\n", + "copying maskrcnn_benchmark/modeling/rpn/retinanet/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn/retinanet\n", + "copying maskrcnn_benchmark/modeling/rpn/retinanet/loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn/retinanet\n", + "copying maskrcnn_benchmark/modeling/rpn/retinanet/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn/retinanet\n", + "copying maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/rpn/retinanet\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/box_head\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/mask_head\n", + "creating build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/keypoint_head.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/__init__.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/loss.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_predictors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "copying maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py -> build/lib.linux-x86_64-3.6/maskrcnn_benchmark/modeling/roi_heads/keypoint_head\n", + "running build_ext\n", + "building 'maskrcnn_benchmark._C' extension\n", + "creating build/temp.linux-x86_64-3.6\n", + "creating build/temp.linux-x86_64-3.6/content\n", + "creating build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark\n", + "creating build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark\n", + "creating build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc\n", + "creating build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu\n", + "creating build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/vision.cpp -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/vision.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "In file included from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/ATen.h:9:0\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/types.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/all.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/extension.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/vision.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp:2\u001b[m\u001b[K:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:71:52:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " at::ScalarType _st = ::detail::scalar_type(TYPE\u001b[01;35m\u001b[K)\u001b[m\u001b[K; \\\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp:242:3:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kin expansion of macro ‘\u001b[01m\u001b[KAT_DISPATCH_FLOATING_TYPES\u001b[m\u001b[K’\n", + " \u001b[01;36m\u001b[KAT_DISPATCH_FLOATING_TYPES\u001b[m\u001b[K(input.type(), \"ROIAlign_forward\", [&] {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:23:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " inline at::ScalarType \u001b[01;36m\u001b[Kscalar_type\u001b[m\u001b[K(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "In file included from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/ATen.h:9:0\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/types.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/data.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include/torch/all.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/torch/extension.h:4\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/vision.h:3\u001b[m\u001b[K,\n", + " from \u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp:2\u001b[m\u001b[K:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:71:52:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " at::ScalarType _st = ::detail::scalar_type(TYPE\u001b[01;35m\u001b[K)\u001b[m\u001b[K; \\\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp:71:3:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kin expansion of macro ‘\u001b[01m\u001b[KAT_DISPATCH_FLOATING_TYPES\u001b[m\u001b[K’\n", + " \u001b[01;36m\u001b[KAT_DISPATCH_FLOATING_TYPES\u001b[m\u001b[K(dets.type(), \"nms\", [&] {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:23:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " inline at::ScalarType \u001b[01;36m\u001b[Kscalar_type\u001b[m\u001b[K(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(83): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lowest\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(84): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"max\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(85): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lower_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(86): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"upper_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:266:124:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:360:126:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:458:126:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:788:124:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:820:126:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu:853:126:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/nms.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/nms.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(83): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lowest\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(84): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"max\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(85): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lower_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(86): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"upper_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu:283:120:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(input.type(), \"ROIAlign_forward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu:329:118:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(grad.type(), \"ROIAlign_backward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(83): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lowest\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(84): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"max\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(85): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lower_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(86): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"upper_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu:137:120:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(input.type(), \"ROIPool_forward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu:185:118:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(grad.type(), \"ROIPool_backward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(83): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lowest\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(84): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"max\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(85): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lower_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(86): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"upper_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu:292:118:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu:343:126:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES_AND_HALF(\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "/usr/local/cuda/bin/nvcc -DWITH_CUDA -I/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc -I/usr/local/lib/python3.6/dist-packages/torch/include -I/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -I/usr/local/lib/python3.6/dist-packages/torch/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu -o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(83): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lowest\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(84): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"max\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(85): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"lower_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "/usr/local/lib/python3.6/dist-packages/torch/include/ATen/cuda/NumericLimits.cuh(86): warning: calling a constexpr __host__ function(\"from_bits\") from a __host__ __device__ function(\"upper_bound\") is not allowed. The experimental flag '--expt-relaxed-constexpr' can be used to allow this.\n", + "\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu:129:122:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(logits.type(), \"SigmoidFocalLoss_forward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu:\u001b[m\u001b[K In lambda function:\n", + "\u001b[01m\u001b[K/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu:173:122:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kc10::ScalarType detail::scalar_type(const at::DeprecatedTypeProperties&)\u001b[m\u001b[K’ is deprecated [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " AT_DISPATCH_FLOATING_TYPES(logits.type(), \"SigmoidFocalLoss_backward\", [&] {\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/torch/include/ATen/Dispatch.h:47:1:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " \u001b[01;36m\u001b[Kinline at::\u001b[m\u001b[KScalarType scalar_type(const at::DeprecatedTypeProperties &t) {\n", + " \u001b[01;36m\u001b[K^~~~~~~~~~~\u001b[m\u001b[K\n", + "x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/vision.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cpu/nms_cpu.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/nms.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.o build/temp.linux-x86_64-3.6/content/maskrcnn-benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.o -L/usr/local/cuda/lib64 -lcudart -o build/lib.linux-x86_64-3.6/maskrcnn_benchmark/_C.cpython-36m-x86_64-linux-gnu.so\n", + "running develop\n", + "running egg_info\n", + "creating maskrcnn_benchmark.egg-info\n", + "writing maskrcnn_benchmark.egg-info/PKG-INFO\n", + "writing dependency_links to maskrcnn_benchmark.egg-info/dependency_links.txt\n", + "writing top-level names to maskrcnn_benchmark.egg-info/top_level.txt\n", + "writing manifest file 'maskrcnn_benchmark.egg-info/SOURCES.txt'\n", + "writing manifest file 'maskrcnn_benchmark.egg-info/SOURCES.txt'\n", + "running build_ext\n", + "copying build/lib.linux-x86_64-3.6/maskrcnn_benchmark/_C.cpython-36m-x86_64-linux-gnu.so -> maskrcnn_benchmark\n", + "Creating /usr/local/lib/python3.6/dist-packages/maskrcnn-benchmark.egg-link (link to .)\n", + "Adding maskrcnn-benchmark 0.1 to easy-install.pth file\n", + "\n", + "Installed /content/maskrcnn-benchmark\n", + "Processing dependencies for maskrcnn-benchmark==0.1\n", + "Finished processing dependencies for maskrcnn-benchmark==0.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1uoPMGDl49Wk", + "colab_type": "text" + }, + "source": [ + "### Checking our Installation\n", + "\n", + "If a `Module not found` error appears, restart the runtime. The libraries should be loaded after restarting" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3q-n76S95KA3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import maskrcnn_benchmark" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-N9mxq4OX6Yc", + "colab_type": "text" + }, + "source": [ + "# Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kLzesfGNX9O2", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch\n", + "\n", + "# Set up custom environment before nearly anything else is imported\n", + "# NOTE: this should be the first import (no not reorder)\n", + "from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n", + "\n", + "from maskrcnn_benchmark.data.build import *\n", + "from maskrcnn_benchmark.structures.bounding_box import BoxList\n", + "from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\n", + "from maskrcnn_benchmark import layers as L\n", + "from maskrcnn_benchmark.utils import cv2_util\n", + "from maskrcnn_benchmark.utils.miscellaneous import mkdir\n", + "from maskrcnn_benchmark.utils.logger import setup_logger\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.data import make_data_loader\n", + "from maskrcnn_benchmark.solver import make_lr_scheduler\n", + "from maskrcnn_benchmark.solver import make_optimizer\n", + "from maskrcnn_benchmark.engine.inference import inference\n", + "from maskrcnn_benchmark.engine.trainer import do_train\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.utils.collect_env import collect_env_info\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.utils.imports import import_file\n", + "from maskrcnn_benchmark.data.datasets.evaluation import evaluate\n", + "from maskrcnn_benchmark.utils.comm import is_main_process, get_world_size\n", + "from maskrcnn_benchmark.utils.comm import all_gather\n", + "from maskrcnn_benchmark.utils.timer import Timer, get_time_str\n", + "from maskrcnn_benchmark.engine.inference import compute_on_dataset, _accumulate_predictions_from_multiple_gpus\n", + "from maskrcnn_benchmark.data.datasets.evaluation.coco import coco_evaluation\n", + "\n", + "from PIL import Image\n", + "import json\n", + "import logging\n", + "import torch\n", + "import numpy as np\n", + "import skimage.draw as draw\n", + "import tempfile\n", + "from pycocotools.coco import COCO\n", + "import os\n", + "import sys\n", + "import random\n", + "import math\n", + "import re\n", + "import time\n", + "import cv2\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "from tqdm import tqdm\n", + "\n", + "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n", + "from torchvision import transforms as T\n", + "from torchvision.transforms import functional as F\n", + "from google.colab.patches import cv2_imshow\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DvU-NYKJ3uzb", + "colab_type": "text" + }, + "source": [ + "# Loading Our Dataset\n", + "\n", + "To train a network using the `facebookresearch/maskrcnn-benchmark` repo, we first need to define our dataset. The dataset needs to be a subclass of `object` and should implement 6 things. \n", + "\n", + "1. `__getitem__(self, idx)`: This function should return a PIL Image, a BoxList and the idx. The Boxlist is an abstraction for our bounding boxes, segmentation masks, class labels and also people keypoints. Please check [ABSTRACTIONS.md](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/ABSTRACTIONS.md) for more details on this. \n", + "\n", + "2. `__len__()`: returns the length of the dataset. \n", + "\n", + "3. `get_img_info(self, idx)`: Return a dict of img info with the fields \"height\" and \"width\" filled in with the idx's image's height and width.\n", + "\n", + "4. `self.coco`: Should be a variable that holds the COCO object for your annotations so that you can perform evaluations of your dataset. \n", + "\n", + "5. `self.id_to_img_map`: Is a dictionary that maps the ids to coco image ids. Almost in all cases just map the idxs to idxs. This is simply a requirement for the coco evaluation. \n", + "\n", + "6. `self.contiguous_category_id_to_json_id`: Another requirement for coco evaluation. It maps the categpry to json category id. Again, for almost all purposes category id and json id should be same. \n", + "\n", + "Given below is a sample fo a dataset. It is the Shape Dataset taken from the [Matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN) Repo. One important detail is that the constructor of the dataset should have the variable `transforms`, which is set inside the constructor. It should then be used in `__getitem__(self, idx)` as shown below." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xnr8tbDz7WjS", + "colab_type": "text" + }, + "source": [ + "## Helper Functions" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "tb_5MERf7c_1", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Helper Functions for the Shapes Dataset\n", + "\n", + "def non_max_suppression(boxes, scores, threshold):\n", + " \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n", + " boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n", + " scores: 1-D array of box scores.\n", + " threshold: Float. IoU threshold to use for filtering.\n", + " \"\"\"\n", + " assert boxes.shape[0] > 0\n", + " if boxes.dtype.kind != \"f\":\n", + " boxes = boxes.astype(np.float32)\n", + "\n", + " # Compute box areas\n", + " y1 = boxes[:, 0]\n", + " x1 = boxes[:, 1]\n", + " y2 = boxes[:, 2]\n", + " x2 = boxes[:, 3]\n", + " area = (y2 - y1) * (x2 - x1)\n", + "\n", + " # Get indicies of boxes sorted by scores (highest first)\n", + " ixs = scores.argsort()[::-1]\n", + "\n", + " pick = []\n", + " while len(ixs) > 0:\n", + " # Pick top box and add its index to the list\n", + " i = ixs[0]\n", + " pick.append(i)\n", + " # Compute IoU of the picked box with the rest\n", + " iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n", + " # Identify boxes with IoU over the threshold. This\n", + " # returns indices into ixs[1:], so add 1 to get\n", + " # indices into ixs.\n", + " remove_ixs = np.where(iou > threshold)[0] + 1\n", + " # Remove indices of the picked and overlapped boxes.\n", + " ixs = np.delete(ixs, remove_ixs)\n", + " ixs = np.delete(ixs, 0)\n", + " return np.array(pick, dtype=np.int32)\n", + "\n", + "def compute_iou(box, boxes, box_area, boxes_area):\n", + " \"\"\"Calculates IoU of the given box with the array of the given boxes.\n", + " box: 1D vector [y1, x1, y2, x2]\n", + " boxes: [boxes_count, (y1, x1, y2, x2)]\n", + " box_area: float. the area of 'box'\n", + " boxes_area: array of length boxes_count.\n", + " Note: the areas are passed in rather than calculated here for\n", + " efficiency. Calculate once in the caller to avoid duplicate work.\n", + " \"\"\"\n", + " # Calculate intersection areas\n", + " y1 = np.maximum(box[0], boxes[:, 0])\n", + " y2 = np.minimum(box[2], boxes[:, 2])\n", + " x1 = np.maximum(box[1], boxes[:, 1])\n", + " x2 = np.minimum(box[3], boxes[:, 3])\n", + " intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n", + " union = box_area + boxes_area[:] - intersection[:]\n", + " iou = intersection / union\n", + " return iou" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5DC0K7tW7d-M", + "colab_type": "text" + }, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WhG_Tu9ELAsj", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class ShapeDataset(object):\n", + " \n", + " def __init__(self, num_examples, transforms=None):\n", + " \n", + " self.height = 128\n", + " self.width = 128\n", + " \n", + " self.num_examples = num_examples\n", + " self.transforms = transforms # IMPORTANT, DON'T MISS\n", + " self.image_info = []\n", + " self.logger = logging.getLogger(__name__)\n", + " \n", + " # Class Names: Note that the ids start from 1, not 0. This repo uses the index 0 for background\n", + " self.class_names = {\"square\": 1, \"circle\": 2, \"triangle\": 3}\n", + " \n", + " # Add images\n", + " # Generate random specifications of images (i.e. color and\n", + " # list of shapes sizes and locations). This is more compact than\n", + " # actual images. Images are generated on the fly in load_image().\n", + " for i in range(num_examples):\n", + " bg_color, shapes = self.random_image(self.height, self.width)\n", + " self.image_info.append({ \"path\":None,\n", + " \"width\": self.width, \"height\": self.height,\n", + " \"bg_color\": bg_color, \"shapes\": shapes\n", + " })\n", + " \n", + " # Fills in the self.coco varibale for evaluation.\n", + " self.get_gt()\n", + " \n", + " # Variables needed for coco mAP evaluation\n", + " self.id_to_img_map = {}\n", + " for i, _ in enumerate(self.image_info):\n", + " self.id_to_img_map[i] = i\n", + "\n", + " self.contiguous_category_id_to_json_id = { 0:0 ,1:1, 2:2, 3:3 }\n", + " \n", + "\n", + " def random_shape(self, height, width):\n", + " \"\"\"Generates specifications of a random shape that lies within\n", + " the given height and width boundaries.\n", + " Returns a tuple of three values:\n", + " * The shape name (square, circle, ...)\n", + " * Shape color: a tuple of 3 values, RGB.\n", + " * Shape dimensions: A tuple of values that define the shape size\n", + " and location. Differs per shape type.\n", + " \"\"\"\n", + " # Shape\n", + " shape = random.choice([\"square\", \"circle\", \"triangle\"])\n", + " # Color\n", + " color = tuple([random.randint(0, 255) for _ in range(3)])\n", + " # Center x, y\n", + " buffer = 20\n", + " y = random.randint(buffer, height - buffer - 1)\n", + " x = random.randint(buffer, width - buffer - 1)\n", + " # Size\n", + " s = random.randint(buffer, height//4)\n", + " return shape, color, (x, y, s)\n", + "\n", + " def random_image(self, height, width):\n", + " \"\"\"Creates random specifications of an image with multiple shapes.\n", + " Returns the background color of the image and a list of shape\n", + " specifications that can be used to draw the image.\n", + " \"\"\"\n", + " # Pick random background color\n", + " bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n", + " # Generate a few random shapes and record their\n", + " # bounding boxes\n", + " shapes = []\n", + " boxes = []\n", + " N = random.randint(1, 4)\n", + " labels = {}\n", + " for _ in range(N):\n", + " shape, color, dims = self.random_shape(height, width)\n", + " shapes.append((shape, color, dims))\n", + " x, y, s = dims\n", + " boxes.append([y-s, x-s, y+s, x+s])\n", + "\n", + " # Apply non-max suppression with 0.3 threshold to avoid\n", + " # shapes covering each other\n", + " keep_ixs = non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n", + " shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n", + " \n", + " return bg_color, shapes\n", + " \n", + " \n", + " def draw_shape(self, image, shape, dims, color):\n", + " \"\"\"Draws a shape from the given specs.\"\"\"\n", + " # Get the center x, y and the size s\n", + " x, y, s = dims\n", + " if shape == 'square':\n", + " cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n", + " elif shape == \"circle\":\n", + " cv2.circle(image, (x, y), s, color, -1)\n", + " elif shape == \"triangle\":\n", + " points = np.array([[(x, y-s),\n", + " (x-s/math.sin(math.radians(60)), y+s),\n", + " (x+s/math.sin(math.radians(60)), y+s),\n", + " ]], dtype=np.int32)\n", + " cv2.fillPoly(image, points, color)\n", + " return image, [ x-s, y-s, x+s, y+s]\n", + "\n", + "\n", + " def load_mask(self, image_id):\n", + " \"\"\"\n", + " Generates instance masks for shapes of the given image ID.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " shapes = info['shapes']\n", + " count = len(shapes)\n", + " mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n", + " \n", + " boxes = []\n", + " \n", + " for i, (shape, _, dims) in enumerate(info['shapes']):\n", + " mask[:, :, i:i+1], box = self.draw_shape(mask[:, :, i:i+1].copy(),\n", + " shape, dims, 1)\n", + " boxes.append(box)\n", + " \n", + " # Handle occlusions\n", + " occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n", + " for i in range(count-2, -1, -1):\n", + " mask[:, :, i] = mask[:, :, i] * occlusion\n", + " occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n", + " # Map class names to class IDs.\n", + " class_ids = np.array([self.class_names[s[0]] for s in shapes])\n", + " return mask.astype(np.uint8), class_ids.astype(np.int32), boxes\n", + " \n", + " def load_image(self, image_id):\n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n", + " image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n", + " image = image * bg_color.astype(np.uint8)\n", + " for shape, color, dims in info['shapes']:\n", + " image, _ = self.draw_shape(image, shape, dims, color)\n", + " return image\n", + " \n", + " def __getitem__(self, idx):\n", + " \n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " image = Image.fromarray(self.load_image(idx))\n", + " masks, labels, boxes = self.load_mask(idx)\n", + " \n", + " # create a BoxList from the boxes\n", + " boxlist = BoxList(boxes, image.size, mode=\"xyxy\")\n", + "\n", + " # add the labels to the boxlist\n", + " boxlist.add_field(\"labels\", torch.tensor(labels))\n", + "\n", + " # Add masks to the boxlist\n", + " masks = np.transpose(masks, (2,0,1))\n", + " masks = SegmentationMask(torch.tensor(masks), image.size, \"mask\")\n", + " boxlist.add_field(\"masks\", masks)\n", + " \n", + " # Important line! don't forget to add this\n", + " if self.transforms:\n", + " image, boxlist = self.transforms(image, boxlist)\n", + "\n", + " # return the image, the boxlist and the idx in your dataset\n", + " return image, boxlist, idx\n", + " \n", + " \n", + " def __len__(self):\n", + " return self.num_examples\n", + " \n", + "\n", + " def get_img_info(self, idx):\n", + " # get img_height and img_width. This is used if\n", + " # we want to split the batches according to the aspect ratio\n", + " # of the image, as it can be more efficient than loading the\n", + " # image from disk\n", + "\n", + " return {\"height\": self.height, \"width\": self.width}\n", + " \n", + " def get_gt(self):\n", + " # Prepares dataset for coco eval\n", + " \n", + " \n", + " images = []\n", + " annotations = []\n", + " results = []\n", + " \n", + " # Define categories\n", + " categories = [ {\"id\": 1, \"name\": \"square\"}, {\"id\": 2, \"name\": \"circle\"}, {\"id\": 3, \"name\": \"triangle\"}]\n", + "\n", + "\n", + " i = 1\n", + " ann_id = 0\n", + "\n", + " for img_id, d in enumerate(self.image_info):\n", + "\n", + " images.append( {\"id\": img_id, 'height': self.height, 'width': self.width } )\n", + "\n", + " for (shape, color, dims) in d['shapes']:\n", + " \n", + " if shape == \"square\":\n", + " category_id = 1\n", + " elif shape == \"circle\":\n", + " category_id = 2\n", + " elif shape == \"triangle\":\n", + " category_id = 3\n", + " \n", + " x, y, s = dims\n", + " bbox = [ x - s, y - s, x+s, y +s ] \n", + " area = (bbox[0] - bbox[2]) * (bbox[1] - bbox[3])\n", + " \n", + " # Format for COCO\n", + " annotations.append( {\n", + " \"id\": int(ann_id),\n", + " \"category_id\": category_id,\n", + " \"image_id\": int(img_id),\n", + " \"area\" : float(area),\n", + " \"bbox\": [ float(bbox[0]), float(bbox[1]), float(bbox[2]) - float(bbox[0]) + 1, float(bbox[3]) - float(bbox[1]) + 1 ], # note that the bboxes are in x, y , width, height format\n", + " \"iscrowd\" : 0\n", + " } )\n", + "\n", + " ann_id += 1\n", + "\n", + " # Save ground truth file\n", + " \n", + " with open(\"tmp_gt.json\", \"w\") as f:\n", + " json.dump({\"images\": images, \"annotations\": annotations, \"categories\": categories }, f)\n", + "\n", + " # Load gt for coco eval\n", + " self.coco = COCO(\"tmp_gt.json\") \n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2hpTvuSp830x", + "colab_type": "text" + }, + "source": [ + "## Visualise Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BI2ncK7kATEh", + "colab_type": "text" + }, + "source": [ + "### Load" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6nsO_MRUbBpk", + "colab_type": "code", + "outputId": "db8c6102-8663-4bb5-e9e9-c9f305f014f7", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 105 + } + }, + "source": [ + "train_dt = ShapeDataset(100)\n", + "im, boxlist, idx = train_dt[0]" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F9njOSX0AU5-", + "colab_type": "text" + }, + "source": [ + "### Display some sample Images" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nMXB9sAW994F", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "outputId": "6de8c71d-c656-42a0-eeb0-24982fcad9de" + }, + "source": [ + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " im, boxlist, idx = train_dt[i]\n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(im)\n", + "plt.show()\n", + " " + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X+wXHV9//Hnm4QEiZLww6YxoRI0\n/kCnrUyG0tFSp/QHUmtwdCiMrVEzZjpfbLW2Y6M4g3/IjNRWqzOtThQktpQfBRky/WorpVqnnS9o\nQORXBGIESb6BqPzyaxxC5P39Y8/VzeXe5N495+z57O7zMXPn7p7dvfvez+ad1/18zrlnIzORJEnl\nOKLrAiRJ0sEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCthXNEnBUR90bEjojY1NbzSGqXvSwN\nX7Txd84RsQC4D/gdYBfwDeD8zLyn8SeT1Bp7WepGWzPn04AdmbkzM/cDVwHrWnouSe2xl6UOLGzp\n564EHuq7vgv4tdnuvODIo3Lh4iUtlSKNpv0/fvQHmfn8jsuYVy8DLDl2US57wXNaLUoaJY//35/w\n48f2x3we01Y4H1ZEbAQ2AixYvIQVv3p2V6VIRXrwf/7pwa5rmKv+fl664ij+11Wv6bgiqRz/cN5/\nz/sxbS1r7wZO7Lu+qtr2M5m5OTPXZubaBQsXt1SGpJoO28twcD8vOXbR0IqTxlVb4fwNYE1ErI6I\nRcB5wNaWnktSe+xlqQOtLGtn5oGIeBfw78AC4LLMvLuN55LUHntZ6kZr+5wz84vAF9v6+ZKGw16W\nhs8zhEmSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhLklQYw1mSpMIY\nzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S9IEe+emF/HOTS/qugxNs7DrAiRJzRok\nbOfzmM985Dvz/vmaH2fOkiQVxpmzJI24YS9LT38+Z9LNM5wlaQSVtJ+4vxaDuhkua0uSVJiBZ84R\ncSLweWA5kMDmzPxERBwHXA2cBDwAnJuZj9UvVVJb7OfRUNJseTbOoptRZ+Z8APiLzDwFOB24ICJO\nATYBN2XmGuCm6rqkstnPBRvVP3ca1bpLMHA4Z+aezLytuvwjYDuwElgHbKnutgU4p26RktplP0tl\naWSfc0ScBLwKuAVYnpl7qpseprdMJmlE2M9lGYeZpzPo+at9tHZEPBe4DnhPZj4ZET+7LTMzInKW\nx20ENgIsWLykbhmSGtBEPy9dcdQwSh1b4xxi7o+eu1oz54g4kl4jX5GZX6g2PxIRK6rbVwB7Z3ps\nZm7OzLWZuXbBwsV1ypDUgKb6ecmxi4ZTsDTGBg7n6P1KfSmwPTM/1nfTVmB9dXk9cMPg5UkaBvu5\nDOM8a55ukl7rIOosa78a+GPgzoi4vdr2AeAjwDURsQF4EDi3XomShsB+lgoycDhn5n8DMcvNZw76\ncyUNn/3crUmdRU69bvc/P5tnCJMkqTCGsyRJhTGcJalDk7qk3c8xeDY/lUqSOmAgHcz9zwdz5ixJ\nUmEMZ0kaMmfNs3NsegxnSZIKYzhLklQYDwiTpCFxyXZuPDjMmbMkScUxnCVJKozhLElD4JL2/E3y\nmBnOkiQVxnCWJKkwhrMkSYUxnCVJKox/5yxJLZrkg5qaMKl/8+zMWZKkwhjOkiQVxnCWJKkwhrMk\nSYUxnCVJKkztcI6IBRHxzYj41+r66oi4JSJ2RMTVEbGofpmShsF+lsrQxMz53cD2vuuXAB/PzBcD\njwEbGngOScNhP0sFqBXOEbEK+H3gs9X1AH4LuLa6yxbgnDrPIWk47GepHHVnzn8HvA94prp+PPB4\nZh6oru8CVs70wIjYGBHbImLbTw88VbMMSQ1opJ9//Nj+9iuVxtzA4RwRrwf2Zuatgzw+Mzdn5trM\nXLtg4eJBy5DUgCb7ecmx7pae4tnBmvPOTS+aqPGsc/rOVwNviIizgaOAY4BPAMsiYmH12/YqYHf9\nMiW1zH6WCjLwzDkz35+ZqzLzJOA84D8z8y3AV4A3V3dbD9xQu0pJrbKf2zFp54Nu02c+8p2JGs82\n/s75r4D3RsQOevusLm3hOSQNh/0sdaCRT6XKzK8CX60u7wROa+LnSho++1nqnmcIkySpMIazJEmF\nMZwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZqjzz8jU88/I1XZchSYazJEmlaeQkJFLJ5jsbns/9j9h+\n/3zL0YSZOuXkJH1oQ5Mm6ZSd/QxnjZVhL0tPfz7DWlITXNaWJKkwzpw1sko8eGummpxNS5ovZ84a\nKVNHVJcYzLMZxZrVvEndd1rHJI+Z4SxJUmEMZ42McZh5OoOWNBeGsyRJhfGAMBVtXGeZ/a/LA8Ym\nh3/zPDeTvK95ijNnFWmSln8n5XVKmjvDWZKkwhjOKsokzZj7TerrnlQu287Oselxn7OKYTj1xsB9\n0JPB/c8HM5QP5sxZkqTC1Jo5R8Qy4LPAK4EE3gHcC1wNnAQ8AJybmY/VqlJjzRnzwX42Hv8z3Ocd\ntX5eePrNXZfQiM9xM29/7Vu6LqNTzpqfre7M+RPAv2Xmy4BfAbYDm4CbMnMNcFN1XZqRwVwU+1kq\nxMDhHBFLgTOASwEyc39mPg6sA7ZUd9sCnFO3SEntsp+lstRZ1l4NfB/4XET8CnAr8G5geWbuqe7z\nMLB8pgdHxEZgI8CCxUtqlKHTNvSG+OuXPtJxJXPnjLk4jfXz0hVHtV/tmPncV68AmLjlbZezZ1cn\nnBcCpwJ/mpm3RMQnmLbklZkZETnTgzNzM7AZYPFzj5/xPvp58DZ13xIC3GAuUmP9vPIVS+3nAX3u\nq1dMTEAbzIdWZ5/zLmBXZt5SXb+WXnM/EhErAKrve+uVKGkI7GepIAPPnDPz4Yh4KCJempn3AmcC\n91Rf64GPVN9vaKTSCTKf2XKdn13CLFplsJ/LMbXEPWWcZtLOlueu7klI/hS4IiIWATuBt9ObjV8T\nERuAB4Fzaz7HRGgzkOfynMMIapezi2c/F2gclroN5fmrFc6ZeTuwdoabzqzzcyUNn/0slcPTd3ao\ni9nybKbX0vRM2lmzNLhRPZr7/Y/uAuDkjusYRZ6+U5Kkwjhz7kBJM+bZnLZhuQeMSYXpP1is1Fn0\n1GxZ9RjOQzYKwTyliZObuJwttaOkoD5cIO/cuBiAkzc/NYxyxoLL2pIkFcaZ85CM0ox5Ope4pbL1\nz6IP3Hx6658RPejS9c6Ni509z5HhPASjHMxTRvH83dKkms/fFU8F+WyPmVqS1nC5rC1JUmGcOas1\nHgwmlW/YM2YPDpsbZ84tOm3D8rFY0u43bq9H0rO5lN09w1mSpMIYzpKkoXN2fmjuc27JOC//Hu7I\nbfc1S6PJwCyHM2dJkgpjOEuSOrFz42Jn67MwnBs2jkdoz2ZSXqc0CboMSQP62QxnSZIKYzhLklQY\nw1mSpML4p1SSNMFK2d/raT0P5sxZkiZUKcGsZzOcJUkqTK1wjog/j4i7I+KuiLgyIo6KiNURcUtE\n7IiIqyNiUVPFSmqP/awSOJvvGTicI2Il8GfA2sx8JbAAOA+4BPh4Zr4YeAzY0EShKt8zL1/jqTtH\nlP08WTz5R/nqLmsvBJ4TEQuBo4E9wG8B11a3bwHOqfkckobDfpYKMfDR2pm5OyL+Bvge8BPgy8Ct\nwOOZeaC62y5g5UyPj4iNwEaABYuXDFpGMSb1bFn9H4JxxPb7AT/4YhQ12c9LVxzVfsEaax65XW9Z\n+1hgHbAaeAGwBDhrro/PzM2ZuTYz1y5YOPrLK1+/9JFZP6VpnE3q6x43TfbzkmPdLV0yl7NHQ51l\n7d8GvpuZ38/Mp4EvAK8GllXLYgCrgN01a5TUPvtZKkidcP4ecHpEHB0RAZwJ3AN8BXhzdZ/1wA31\nSpQ0BPazijPJs/w6+5xviYhrgduAA8A3gc3A/wauiogPV9subaJQSe2xn8ffqAbdpO5/rnX6zsy8\nCLho2uadwGl1fq6k4bOfpXJ4hjBJGnOjOmueZIazJKl4k3biFMNZkqTC+JGRkjSmJmmmOW6cOUuS\nVBjDuWGTdMas2V7nEdvv/9mpPCWpSZOyGuCytiSNmUkJsHHmzFmSpMIYzi0Z56XtSVq6l0bNJMya\nJ+HPqgxntcb9zpLaNM4BbThLklQYw7lF47j8O26vR5JKZDhLkkbWuO5/NpwlSSqM4TwE47AUPOgS\nvSckkaT5M5yHZJT3P49q3ZImx7gtbRvOkiQVxnAeslGahTY523dpW5LmznDuwCgscbdRn/ufJbVp\nnI7cNpwlSSqMn0rVof7Z6WkblndYyWgtt0vSuHPmLElSYQ4bzhFxWUTsjYi7+rYdFxE3RsT91fdj\nq+0REZ+MiB0RcUdEnNpm8eNkaj/0MGewXTwneHBYl+xnTYJx2O88l5nz5cBZ07ZtAm7KzDXATdV1\ngNcBa6qvjcCnmilzsrQZml0F8nQeHNaZy7GfNQFG/eCww4ZzZn4NeHTa5nXAluryFuCcvu2fz56b\ngWURsaKpYiXVYz9Lo2HQA8KWZ+ae6vLDwNTRTCuBh/rut6vatodpImIjvd/GWbB4yYBljL+5zHCn\nDibrejY8iCO2388zL1/TdRmTrtF+XrriqPYqlSZE7aO1MzMjIgd43GZgM8Di5x4/78fr50YxlPtN\nLW8b0j1dLvc30c8rX7HUflYxppa2T978VMeVzM+gR2s/MrW8VX3fW23fDZzYd79V1TZJ5bKfpcIM\nGs5bgfXV5fXADX3b31od5Xk68ETfcpl0SB4g1tkY2M9SYQ67rB0RVwKvBU6IiF3ARcBHgGsiYgPw\nIHBudfcvAmcDO4B9wNtbqFljbFKXuIcVyuPUzwduPr3rEoZilI84LsnOjYtHamn7sOGcmefPctOZ\nM9w3gQvqFiWpHfazNBo8Q5iKNEl/Bz0pr1PS3HlubRWtP7jGaanbQNbhuJzdvFE6ctuZsyRJhTGc\nNTLGYbY5Scv1UqlGYVXCcJYkqTDuc9ZIGcV90M6UNR+jMKtT+wxnjayZQq/rwDaIpdFQ+sFhLmtL\nklQYZ84aK8OeTTtTVpNc0tYUw1ljb64BOhXiBq6GzVDuTqmn9XRZW5KkwjhzlirOmCWVwpmzJHXI\nJe3u7dy4uLj3wXCWJKkwhrMkSYUxnCWpAyUupU66kt4Pw1mSpMIYzpI0ZCXN0HSwUlY0DGdJkgpj\nOEuSVBjDWZKGpJQlUx1e1++T4SxJUmEOG84RcVlE7I2Iu/q2fTQivh0Rd0TE9RGxrO+290fEjoi4\nNyJ+r63CJc2f/SyNhrnMnC8Hzpq27UbglZn5y8B9wPsBIuIU4DzgFdVj/iEiFjRWraS6Lsd+7kTX\ny6Savy53Qxw2nDPza8Cj07Z9OTMPVFdvBlZVl9cBV2XmU5n5XWAHcFqD9UqqwX6WRkMT+5zfAXyp\nurwSeKjvtl3VtmeJiI0RsS0itv30QHmfpSlNqNr9/OPH9rdcojT+an1kZERcCBwArpjvYzNzM7AZ\nYPFzj886dUiqr6l+XvmKpfZzH5ezR9/Ue3jy5uFNJAcO54h4G/B64MzMnGrG3cCJfXdbVW2TVDD7\nWSrLQOEcEWcB7wN+MzP39d20FfjniPgY8AJgDfD12lVKao393K5hzrY0Pg4bzhFxJfBa4ISI2AVc\nRO9ozsXAjREBcHNm/klm3h0R1wD30FseuyAzf9pW8ZLmx36WRsNhwzkzz59h86WHuP/FwMV1ipLU\nDvtZGg2eIUySpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmHi5ycD6rCIiO8DPwZ+0HUtsziB\nMmsrtS4ot7ZS64Jn1/bCzHx+V8UMKiJ+BNzbdR2zGKX3vxSl1gWjU9u8e7mIcAaIiG2ZubbrOmZS\nam2l1gXl1lZqXVB2bfNR8uuwtvkrtS4Y79pc1pYkqTCGsyRJhSkpnDd3XcAhlFpbqXVBubWVWheU\nXdt8lPw6rG3+Sq0Lxri2YvY5S5KknpJmzpIkiQLCOSLOioh7I2JHRGzquJYTI+IrEXFPRNwdEe+u\ntn8oInZHxO3V19kd1fdARNxZ1bCt2nZcRNwYEfdX348dck0v7RuX2yPiyYh4T1djFhGXRcTeiLir\nb9uMYxQ9n6z+7d0REad2UNtHI+Lb1fNfHxHLqu0nRcRP+sbv023W1pRS+tleHrgu+3nwuprt5czs\n7AtYAHwHOBlYBHwLOKXDelYAp1aXnwfcB5wCfAj4yy7HqqrpAeCEadv+GthUXd4EXNLx+/kw8MKu\nxgw4AzgVuOtwYwScDXwJCOB04JYOavtdYGF1+ZK+2k7qv98ofJXUz/ZyY++n/Tz3uhrt5a5nzqcB\nOzJzZ2buB64C1nVVTGbuyczbqss/ArYDK7uqZ47WAVuqy1uAczqs5UzgO5n5YFcFZObXgEenbZ5t\njNYBn8+em4FlEbFimLVl5pcz80B19WZgVVvPPwTF9LO93Aj7eR51Nd3LXYfzSuChvuu7KKSBIuIk\n4FXALdWmd1XLFZd1sdxUSeDLEXFrRGysti3PzD3V5YeB5d2UBsB5wJV910sYM5h9jEr79/cOer/5\nT1kdEd+MiP+KiN/oqqh5KG08AXu5Bvt5cLV7uetwLlJEPBe4DnhPZj4JfAp4EfCrwB7gbzsq7TWZ\neSrwOuCCiDij/8bsraF0cvh9RCwC3gD8S7WplDE7SJdjdCgRcSFwALii2rQH+KXMfBXwXuCfI+KY\nruobVfbyYOznwTXVy12H827gxL7rq6ptnYmII+k18xWZ+QWAzHwkM3+amc8An6G3fDd0mbm7+r4X\nuL6q45GppZvq+94uaqP3n8xtmflIVWMRY1aZbYyK+PcXEW8DXg+8pfrPhsx8KjN/WF2+ld6+3JcM\nu7Z5KmI8p9jLtdjPA2iyl7sO528AayJidfWb2nnA1q6KiYgALgW2Z+bH+rb377d4I3DX9McOobYl\nEfG8qcv0Dj64i954ra/uth64Ydi1Vc6nbwmshDHrM9sYbQXeWh3leTrwRN9y2VBExFnA+4A3ZOa+\nvu3Pj4gF1eWTgTXAzmHWNoBi+tlers1+nqfGe7mto9nm+kXvCLv76P02cWHHtbyG3hLJHcDt1dfZ\nwD8Cd1bbtwIrOqjtZHpHv34LuHtqrIDjgZuA+4H/AI7roLYlwA+BpX3bOhkzev+h7AGeprfPacNs\nY0TvqM6/r/7t3Qms7aC2HfT2k039e/t0dd83Ve/z7cBtwB8M+30d8DUW0c/2cq367OfB6mq0lz1D\nmCRJhel6WVuSJE1jOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhL\nklQYw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXG\ncJYkqTCthXNEnBUR90bEjojY1NbzSGqXvSwNX2Rm8z80YgFwH/A7wC7gG8D5mXlP408mqTX2stSN\nhS393NOAHZm5EyAirgLWATM29NIjn5PLj1raUinSaLr//z3yg8x8fsdlzKuXAY5Ztih/4RePHlJ5\n42f3omO7LmHeVu5/rOsSirb34X08+fj+mM9j2grnlcBDfdd3Ab/Wf4eI2AhsBPiFxc/jk2vf0lIp\n0mh63Vc/9mDXNTCHXoaD+/n5y5/D33zmN4dT3Rj64Oo3dl3CvH34u9d3XULR/vKd/zXvx7QVzoeV\nmZuBzQAved4vNr+2Lmlo+vv5xS9bZj/P0ygGcr/++g3qZrR1QNhu4MS+66uqbZJGi73cslEP5uk+\nuPqNY/eautBWOH8DWBMRqyNiEXAesLWl55LUHntZ6kAry9qZeSAi3gX8O7AAuCwz727juSS1x15u\nz7jPLj+4+o0ucdfQ2j7nzPwi8MW2fr6k4bCXmzPugTzd9NdrWM+dZwiTJKkwhrMkDcGkzZpn4hjM\nneEsSVJhDGdJkgrT2UlIJGkSuJR7sKnx8OCwQ3PmLGmsXH3xdVx98XVdlwEYzIfi2Bya4SxJUmFc\n1pY0NvpnzFOX//DCNw29DmeFc+MS9+ycOUuSVBjDWZKkwhjOkkbeoQ4CG+bBYX4i02Acs2cznCVJ\nKozhLElSYQxnSSNtLsvWw/jbZ5dm63GXwMEMZ0mSCmM4S5JUGMNZ0kgaZKm6jaVtl2Ob5Vj2GM6S\nJBXGcJY0UUr6YAxpNoazpJFjuGrcGc6SJBXGcJY0Mppckm7i53jwUjs8yK5GOEfEiRHxlYi4JyLu\njoh3V9uPi4gbI+L+6vuxzZUrqQ32s1SWOjPnA8BfZOYpwOnABRFxCrAJuCkz1wA3Vdcllc1+lgoy\ncDhn5p7MvK26/CNgO7ASWAdsqe62BTinbpGS2jUK/dzGQWAeua1SNbLPOSJOAl4F3AIsz8w91U0P\nA8tneczGiNgWEdueeHpfE2VIakDdfn7y8f1DqVMaZ7XDOSKeC1wHvCczn+y/LTMTyJkel5mbM3Nt\nZq5deuTRdcuQ1IAm+vmYZYuGUKk03mqFc0QcSa+Rr8jML1SbH4mIFdXtK4C99UqUNAyl9vMwlp5d\n2lZp6hytHcClwPbM/FjfTVuB9dXl9cANg5cnaRjsZ6ksC2s89tXAHwN3RsTt1bYPAB8BromIDcCD\nwLn1SpQ0BPazVJCBwzkz/xuIWW4+c9CfK2n4Su3nYS43Tz3XH174pqE9pzQbzxAmSVJhDGdJ6jOX\n2bqnlxyOSR7jOvucJakVpR89/eHvXg9MdngMw9Q4TyJnzpIkFcaZs6RilDJj9uAwdc2ZsyRJhTGc\nJUkqjOEsqQilLGn381Or1BXDWZKkwhjOkiQVxnCW1KlRWDouvT6NH8NZkuZgFH6J0PgwnCVJKozh\nLKkzoz4TneTTS7bpw9+9fuLH1nCWJKkwhrMkzcOoz/Y1Gjy3tqShM+CkQ3PmLElSYQxnSZqn/j+r\n8uClZjmWPYazpKFySVs6PMNZkqTC1A7niFgQEd+MiH+trq+OiFsiYkdEXB0Ri+qXKWkY2uzncTzD\nVv/rcTm2HncPHKyJmfO7ge191y8BPp6ZLwYeAzY08ByShsN+lgpQK5wjYhXw+8Bnq+sB/BZwbXWX\nLcA5dZ5D0nDYz1I56s6c/w54H/BMdf144PHMPFBd3wWsrPkckoajtX4et+Xsfh65XZ9j9mwDh3NE\nvB7Ym5m3Dvj4jRGxLSK2PfH0vkHLkNSAJvv5ycf3N1zdaBjnX0A0fHXOEPZq4A0RcTZwFHAM8Alg\nWUQsrH7bXgXsnunBmbkZ2Azwkuf9YtaoQ1J9jfXzi1+2zH6Wahp45pyZ78/MVZl5EnAe8J+Z+Rbg\nK8Cbq7utB26oXaWkVrXVz+N4hPZcuLw9sw+ccfA/H8dpdm38nfNfAe+NiB309lld2sJzSBoO+1nq\nQCMffJGZXwW+Wl3eCZzWxM+VNHz28+CmVgn+8MI3Ab2Z4QdXv7HLkorQP2Oeunz0lmdmu7vwDGGS\nWjKJy9kzcelWgzCcJUkqjJ/nLEkNm768PammHwDWb9/6I1zaPgRnzpIa9eieF7mkPQOXtjUfhrMk\nSYUxnCWpJdNXEKYODpuEWfShlrSn7Ft/BPvWG0MzcVQkqUWznYhlUkJagzGcJUkqjOEsSR0at9nz\nB864YU5L2v1c2n42/5RKkobg6ouvm/VPq/oDehTPKDZVvyHbHEdSkqTCOHOWpIJMX+YudSbd9HL8\n1KzbE5P0GM6SNCSDnDmspCXv2QK5yeVszxzW47K2JEmFceYsSSNimEve43YU+ahx5ixJUmGcOUvS\nkB3qz6rmYz6z26lZdlsHcrXxMyd537PhLEkdGPbHSo5CKOvnHF1JkgpjOEuSijTJs/PJfeWSVICZ\nPrGqdJMcmsPiCEuSVJha4RwRyyLi2oj4dkRsj4hfj4jjIuLGiLi/+n5sU8VKao/9rBLtW3/ERM7U\n677iTwD/lpkvA34F2A5sAm7KzDXATdV1SeWznzty9cXXjcTydpdBOWkhPfArjYilwBnApQCZuT8z\nHwfWAVuqu20BzqlbpKR22c9SWer8GrIa+D7wuYj4ZkR8NiKWAMszc091n4eB5XWLlNQ6+1kqSJ1w\nXgicCnwqM18F/JhpS16ZmUDO9OCI2BgR2yJi2xNP76tRhqQGNNbPT+3/QevFjquSl7ZLWVIupY62\n1XmVu4BdmXlLdf1aes39SESsAKi+753pwZm5OTPXZubapUceXaMMSQ1orJ8XLzphKAVL42zgcM7M\nh4GHIuKl1aYzgXuArcD6att64IZaFUpqnf2s2UzagVilqHtu7T8FroiIRcBO4O30Av+aiNgAPAic\nW/M5JA2H/VyAYZ9zexRNwgdj1ArnzLwdWDvDTWfW+bmShs9+lsrhp1JJUoG6nkG7lN0tR1+SpMIY\nzpKkkTTOs/vxfWWSNAa6+NvncQ69UeE7IElSYTwgTJIEjOaMeVz/rGr03glJmjCj8qlVXRrFXywO\nZbxejSRJY8BwliSN3cxz1PluSNKIcGn70MbpPODj8SokSRojHq0tqVHHrfiOH9owQsZlpjlufFck\nSWNlHH7hGP1XIEnSmDGcJWlCjcMMc1y5z7kD/+efTuq6hE79+h890HUJ0kSbhFAe9TOHjf87JEnS\niDGcJUkqjOEsSVJhDGdJmiCTsL+536ieNWz0KpYkaZ5GLaBHq1pJkiZArXCOiD+PiLsj4q6IuDIi\njoqI1RFxS0TsiIirI2JRU8VKao/9PN5GdXl3Ug38TkXESuDPgLWZ+UpgAXAecAnw8cx8MfAYsKGJ\nQiW1x37WJBilX1DqVrkQeE5ELASOBvYAvwVcW92+BTin5nNIGg77WSrEwOGcmbuBvwG+R6+JnwBu\nBR7PzAPV3XYBK2d6fERsjIhtEbHtiaf3DVqGpAY02c9PPr5/GCVrHkZltqifq7OsfSywDlgNvABY\nApw118dn5ubMXJuZa5ceefSgZUhqQJP9fMwyd0uXYpSWcYdpFMakToW/DXw3M7+fmU8DXwBeDSyr\nlsUAVgG7a9YoqX32s1SQOuH8PeD0iDg6IgI4E7gH+Arw5uo+64Eb6pUoaQjsZ6kgdfY530LvQJHb\ngDurn7UZ+CvgvRGxAzgeuLSpnrMjAAAIr0lEQVSBOiW1yH4eLy5nH17pY1TrIyMz8yLgommbdwKn\n1fm5kobPfpbKUe6vDZIktazU2XOZVUmSBlJq2Gh+fBclSSpMrX3OkqQyOGMe3NTYHb3lmY4r+Tnf\nTUmSCmM4S5JUGMNZkkacS9rNKGkcy6lEkiQBhrMkScXxaG1JGlElLcOOi1KO3PadlSSpMIazJEmF\nMZwlaQS5pN2ursfXd1eSpMJ4QJgkjaCuD1hSu5w5S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGc\nJUkqjOEsSVJhDhvOEXFZROyNiLv6th0XETdGxP3V92Or7RERn4yIHRFxR0Sc2mbxkubHfpZGw1xm\nzpcDZ03btgm4KTPXADdV1wFeB6ypvjYCn2qmTEkNuRz7WSreYcM5M78GPDpt8zpgS3V5C3BO3/bP\nZ8/NwLKIWNFUsZLqsZ+l0TDoPuflmbmnuvwwsLy6vBJ4qO9+u6ptzxIRGyNiW0Rse+LpfQOWIakB\njfbzk4/vb69SaULUPiAsMxPIAR63OTPXZubapUceXbcMSQ1oop+PWbaohcqkyTJoOD8ytbxVfd9b\nbd8NnNh3v1XVNknlsp+lwgwazluB9dXl9cANfdvfWh3leTrwRN9ymaQy2c9SYQ77kZERcSXwWuCE\niNgFXAR8BLgmIjYADwLnVnf/InA2sAPYB7y9hZolDch+lkbDYcM5M8+f5aYzZ7hvAhfULUpSO+xn\naTR4hjBJkgpjOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhLklQY\nw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCrOw6wIm0a//0QNdlyBJKpgzZ0mS\nCmM4S5JUGMNZkqTCGM6SJBXmsOEcEZdFxN6IuKtv20cj4tsRcUdEXB8Ry/pue39E7IiIeyPi99oq\nXNL82c/SaJjLzPly4Kxp224EXpmZvwzcB7wfICJOAc4DXlE95h8iYkFj1Uqq63LsZ6l4hw3nzPwa\n8Oi0bV/OzAPV1ZuBVdXldcBVmflUZn4X2AGc1mC9kmqwn6XR0MQ+53cAX6ourwQe6rttV7XtWSJi\nY0Rsi4htTzy9r4EyJDWgdj8/+fj+lkuUxl+tcI6IC4EDwBXzfWxmbs7MtZm5dumRR9cpQ1IDmurn\nY5Ytar44acIMfIawiHgb8HrgzMzMavNu4MS+u62qtkkqmP0slWWgmXNEnAW8D3hDZvavSW8FzouI\nxRGxGlgDfL1+mZLaYj9L5TnszDkirgReC5wQEbuAi+gdzbkYuDEiAG7OzD/JzLsj4hrgHnrLYxdk\n5k/bKl7S/NjP0mg4bDhn5vkzbL70EPe/GLi4TlGS2mE/S6PBM4RJklQYw1mSpMIYzpIkFcZwliSp\nMIazJEmFMZwlSSpM/PxkQB0WEfF94MfAD7quZRYnUGZtpdYF5dZWal3w7NpemJnP76qYQUXEj4B7\nu65jFqP0/pei1LpgdGqbdy8XEc4AEbEtM9d2XcdMSq2t1Lqg3NpKrQvKrm0+Sn4d1jZ/pdYF412b\ny9qSJBXGcJYkqTAlhfPmrgs4hFJrK7UuKLe2UuuCsmubj5Jfh7XNX6l1wRjXVsw+Z0mS1FPSzFmS\nJGE4S5JUnM7DOSLOioh7I2JHRGzquJYTI+IrEXFPRNwdEe+utn8oInZHxO3V19kd1fdARNxZ1bCt\n2nZcRNwYEfdX348dck0v7RuX2yPiyYh4T1djFhGXRcTeiLirb9uMYxQ9n6z+7d0REad2UNtHI+Lb\n1fNfHxHLqu0nRcRP+sbv023W1pRS+tleHrgu+3nwuprt5czs7AtYAHwHOBlYBHwLOKXDelYAp1aX\nnwfcB5wCfAj4yy7HqqrpAeCEadv+GthUXd4EXNLx+/kw8MKuxgw4AzgVuOtwYwScDXwJCOB04JYO\navtdYGF1+ZK+2k7qv98ofJXUz/ZyY++n/Tz3uhrt5a5nzqcBOzJzZ2buB64C1nVVTGbuyczbqss/\nArYDK7uqZ47WAVuqy1uAczqs5UzgO5n5YFcFZObXgEenbZ5tjNYBn8+em4FlEbFimLVl5pcz80B1\n9WZgVVvPPwTF9LO93Aj7eR51Nd3LXYfzSuChvuu7KKSBIuIk4FXALdWmd1XLFZd1sdxUSeDLEXFr\nRGysti3PzD3V5YeB5d2UBsB5wJV910sYM5h9jEr79/cOer/5T1kdEd+MiP+KiN/oqqh5KG08AXu5\nBvt5cLV7uetwLlJEPBe4DnhPZj4JfAp4EfCrwB7gbzsq7TWZeSrwOuCCiDij/8bsraF08rdxEbEI\neAPwL9WmUsbsIF2O0aFExIXAAeCKatMe4Jcy81XAe4F/johjuqpvVNnLg7GfB9dUL3cdzruBE/uu\nr6q2dSYijqTXzFdk5hcAMvORzPxpZj4DfIbe8t3QZebu6vte4Pqqjkemlm6q73u7qI3efzK3ZeYj\nVY1FjFlltjEq4t9fRLwNeD3wluo/GzLzqcz8YXX5Vnr7cl8y7NrmqYjxnGIv12I/D6DJXu46nL8B\nrImI1dVvaucBW7sqJiICuBTYnpkf69vev9/ijcBd0x87hNqWRMTzpi7TO/jgLnrjtb6623rghmHX\nVjmfviWwEsasz2xjtBV4a3WU5+nAE33LZUMREWcB7wPekJn7+rY/PyIWVJdPBtYAO4dZ2wCK6Wd7\nuTb7eZ4a7+W2jmab6xe9I+zuo/fbxIUd1/IaekskdwC3V19nA/8I3Flt3wqs6KC2k+kd/fot4O6p\nsQKOB24C7gf+Aziug9qWAD8ElvZt62TM6P2Hsgd4mt4+pw2zjRG9ozr/vvq3dyewtoPadtDbTzb1\n7+3T1X3fVL3PtwO3AX8w7Pd1wNdYRD/by7Xqs58Hq6vRXvb0nZIkFabrZW1JkjSN4SxJUmEMZ0mS\nCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTD/HyG3uDbwtsBXAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ORQXaa6k30yD", + "colab_type": "text" + }, + "source": [ + "# Training a Model\n", + "\n", + "Now we move on to training our very own model. Here we will be fine-tuning a Mask RCNN on this dataset. To do this we need\n", + "\n", + "1. A base model that has the same amount of output classes as our dataset. In this case, we have need for only 3 classes instead of COCO's 80. Hence , we first need to do some model trimming. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SVaNqbpiAzwx", + "colab_type": "text" + }, + "source": [ + "## Model Trimming" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hbzY16ocEdrg", + "colab_type": "text" + }, + "source": [ + "### Helper Functions for Visualising Detections" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yk5a6RpsEdIt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class Resize(object):\n", + " def __init__(self, min_size, max_size):\n", + " self.min_size = min_size\n", + " self.max_size = max_size\n", + "\n", + " # modified from torchvision to add support for max size\n", + " def get_size(self, image_size):\n", + " w, h = image_size\n", + " size = self.min_size\n", + " max_size = self.max_size\n", + " if max_size is not None:\n", + " min_original_size = float(min((w, h)))\n", + " max_original_size = float(max((w, h)))\n", + " if max_original_size / min_original_size * size > max_size:\n", + " size = int(round(max_size * min_original_size / max_original_size))\n", + "\n", + " if (w <= h and w == size) or (h <= w and h == size):\n", + " return (h, w)\n", + "\n", + " if w < h:\n", + " ow = size\n", + " oh = int(size * h / w)\n", + " else:\n", + " oh = size\n", + " ow = int(size * w / h)\n", + "\n", + " return (oh, ow)\n", + "\n", + " def __call__(self, image):\n", + " size = self.get_size(image.size)\n", + " image = F.resize(image, size)\n", + " return image\n", + " \n", + " \n", + "class COCODemo(object):\n", + " \n", + " def __init__(\n", + " self,\n", + " cfg,\n", + " confidence_threshold=0.7,\n", + " show_mask_heatmaps=False,\n", + " masks_per_dim=2,\n", + " min_image_size=224,\n", + " ):\n", + " self.cfg = cfg.clone()\n", + " self.model = build_detection_model(cfg)\n", + " self.model.eval()\n", + " self.device = torch.device(cfg.MODEL.DEVICE)\n", + " self.model.to(self.device)\n", + " self.min_image_size = min_image_size\n", + "\n", + " save_dir = cfg.OUTPUT_DIR\n", + " checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n", + " _ = checkpointer.load(cfg.MODEL.WEIGHT)\n", + "\n", + " self.transforms = self.build_transform()\n", + "\n", + " mask_threshold = -1 if show_mask_heatmaps else 0.5\n", + " self.masker = Masker(threshold=mask_threshold, padding=1)\n", + "\n", + " # used to make colors for each class\n", + " self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n", + "\n", + " self.cpu_device = torch.device(\"cpu\")\n", + " self.confidence_threshold = confidence_threshold\n", + " self.show_mask_heatmaps = show_mask_heatmaps\n", + " self.masks_per_dim = masks_per_dim\n", + "\n", + " def build_transform(self):\n", + " \"\"\"\n", + " Creates a basic transformation that was used to train the models\n", + " \"\"\"\n", + " cfg = self.cfg\n", + "\n", + " # we are loading images with OpenCV, so we don't need to convert them\n", + " # to BGR, they are already! So all we need to do is to normalize\n", + " # by 255 if we want to convert to BGR255 format, or flip the channels\n", + " # if we want it to be in RGB in [0-1] range.\n", + " if cfg.INPUT.TO_BGR255:\n", + " to_bgr_transform = T.Lambda(lambda x: x * 255)\n", + " else:\n", + " to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n", + "\n", + " normalize_transform = T.Normalize(\n", + " mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n", + " )\n", + " min_size = cfg.INPUT.MIN_SIZE_TEST\n", + " max_size = cfg.INPUT.MAX_SIZE_TEST\n", + " transform = T.Compose(\n", + " [\n", + " T.ToPILImage(),\n", + " Resize(min_size, max_size),\n", + " T.ToTensor(),\n", + " to_bgr_transform,\n", + " normalize_transform,\n", + " ]\n", + " )\n", + " return transform\n", + "\n", + " def run_on_opencv_image(self, image):\n", + " \"\"\"\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " predictions = self.compute_prediction(image)\n", + " top_predictions = self.select_top_predictions(predictions)\n", + "\n", + " result = image.copy()\n", + " if self.show_mask_heatmaps:\n", + " return self.create_mask_montage(result, top_predictions)\n", + " result = self.overlay_boxes(result, top_predictions)\n", + " if self.cfg.MODEL.MASK_ON:\n", + " result = self.overlay_mask(result, top_predictions)\n", + " if self.cfg.MODEL.KEYPOINT_ON:\n", + " result = self.overlay_keypoints(result, top_predictions)\n", + " result = self.overlay_class_names(result, top_predictions)\n", + "\n", + " return result\n", + "\n", + " def compute_prediction(self, original_image):\n", + " \"\"\"\n", + " Arguments:\n", + " original_image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " # apply pre-processing to image\n", + " image = self.transforms(original_image)\n", + " # convert to an ImageList, padded so that it is divisible by\n", + " # cfg.DATALOADER.SIZE_DIVISIBILITY\n", + " image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " image_list = image_list.to(self.device)\n", + " # compute predictions\n", + " with torch.no_grad():\n", + " predictions = self.model(image_list)\n", + " predictions = [o.to(self.cpu_device) for o in predictions]\n", + "\n", + " # always single image is passed at a time\n", + " prediction = predictions[0]\n", + "\n", + " # reshape prediction (a BoxList) into the original image size\n", + " height, width = original_image.shape[:-1]\n", + " prediction = prediction.resize((width, height))\n", + "\n", + " if prediction.has_field(\"mask\"):\n", + " # if we have masks, paste the masks in the right position\n", + " # in the image, as defined by the bounding boxes\n", + " masks = prediction.get_field(\"mask\")\n", + " # always single image is passed at a time\n", + " masks = self.masker([masks], [prediction])[0]\n", + " prediction.add_field(\"mask\", masks)\n", + " return prediction\n", + "\n", + " def select_top_predictions(self, predictions):\n", + " \"\"\"\n", + " Select only predictions which have a `score` > self.confidence_threshold,\n", + " and returns the predictions in descending order of score\n", + " Arguments:\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores`.\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\")\n", + " keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n", + " predictions = predictions[keep]\n", + " scores = predictions.get_field(\"scores\")\n", + " _, idx = scores.sort(0, descending=True)\n", + " return predictions[idx]\n", + "\n", + " def compute_colors_for_labels(self, labels):\n", + " \"\"\"\n", + " Simple function that adds fixed colors depending on the class\n", + " \"\"\"\n", + " colors = labels[:, None] * self.palette\n", + " colors = (colors % 255).numpy().astype(\"uint8\")\n", + " return colors\n", + "\n", + " def overlay_boxes(self, image, predictions):\n", + " \"\"\"\n", + " Adds the predicted boxes on top of the image\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `labels`.\n", + " \"\"\"\n", + " labels = predictions.get_field(\"labels\")\n", + " boxes = predictions.bbox\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for box, color in zip(boxes, colors):\n", + " box = box.to(torch.int64)\n", + " top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n", + " image = cv2.rectangle(\n", + " image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + " def overlay_mask(self, image, predictions):\n", + " \"\"\"\n", + " Adds the instances contours for each predicted object.\n", + " Each label has a different color.\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask` and `labels`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\").numpy()\n", + " labels = predictions.get_field(\"labels\")\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for mask, color in zip(masks, colors):\n", + " thresh = mask[0, :, :, None]\n", + " contours, hierarchy = cv2_util.findContours(\n", + " thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n", + " )\n", + " image = cv2.drawContours(image, contours, -1, color, 3)\n", + "\n", + " composite = image\n", + "\n", + " return composite\n", + "\n", + " def overlay_keypoints(self, image, predictions):\n", + " keypoints = predictions.get_field(\"keypoints\")\n", + " kps = keypoints.keypoints\n", + " scores = keypoints.get_field(\"logits\")\n", + " kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n", + " for region in kps:\n", + " image = vis_keypoints(image, region.transpose((1, 0)))\n", + " return image\n", + "\n", + " def create_mask_montage(self, image, predictions):\n", + " \"\"\"\n", + " Create a montage showing the probability heatmaps for each one one of the\n", + " detected objects\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\")\n", + " masks_per_dim = self.masks_per_dim\n", + " masks = L.interpolate(\n", + " masks.float(), scale_factor=1 / masks_per_dim\n", + " ).byte()\n", + " height, width = masks.shape[-2:]\n", + " max_masks = masks_per_dim ** 2\n", + " masks = masks[:max_masks]\n", + " # handle case where we have less detections than max_masks\n", + " if len(masks) < max_masks:\n", + " masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n", + " masks_padded[: len(masks)] = masks\n", + " masks = masks_padded\n", + " masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n", + " result = torch.zeros(\n", + " (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n", + " )\n", + " for y in range(masks_per_dim):\n", + " start_y = y * height\n", + " end_y = (y + 1) * height\n", + " for x in range(masks_per_dim):\n", + " start_x = x * width\n", + " end_x = (x + 1) * width\n", + " result[start_y:end_y, start_x:end_x] = masks[y, x]\n", + " return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n", + "\n", + " def overlay_class_names(self, image, predictions):\n", + " \"\"\"\n", + " Adds detected class names and scores in the positions defined by the\n", + " top-left corner of the predicted bounding box\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores` and `labels`.\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\").tolist()\n", + " labels = predictions.get_field(\"labels\").tolist()\n", + " labels = [self.CATEGORIES[i] for i in labels]\n", + " boxes = predictions.bbox\n", + "\n", + " template = \"{}: {:.2f}\"\n", + " for box, score, label in zip(boxes, scores, labels):\n", + " x, y = box[:2]\n", + " s = template.format(label, score)\n", + " cv2.putText(\n", + " image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n", + "\n", + "def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n", + " \"\"\"Visualizes keypoints (adapted from vis_one_image).\n", + " kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n", + " \"\"\"\n", + " dataset_keypoints = PersonKeypoints.NAMES\n", + " kp_lines = PersonKeypoints.CONNECTIONS\n", + "\n", + " # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n", + " cmap = plt.get_cmap('rainbow')\n", + " colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n", + " colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n", + "\n", + " # Perform the drawing on a copy of the image, to allow for blending.\n", + " kp_mask = np.copy(img)\n", + "\n", + " # Draw mid shoulder / mid hip first for better visualization.\n", + " mid_shoulder = (\n", + " kps[:2, dataset_keypoints.index('right_shoulder')] +\n", + " kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n", + " sc_mid_shoulder = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_shoulder')],\n", + " kps[2, dataset_keypoints.index('left_shoulder')])\n", + " mid_hip = (\n", + " kps[:2, dataset_keypoints.index('right_hip')] +\n", + " kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n", + " sc_mid_hip = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_hip')],\n", + " kps[2, dataset_keypoints.index('left_hip')])\n", + " nose_idx = dataset_keypoints.index('nose')\n", + " if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n", + " color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n", + " if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n", + " color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n", + "\n", + " # Draw the keypoints.\n", + " for l in range(len(kp_lines)):\n", + " i1 = kp_lines[l][0]\n", + " i2 = kp_lines[l][1]\n", + " p1 = kps[0, i1], kps[1, i1]\n", + " p2 = kps[0, i2], kps[1, i2]\n", + " if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, p1, p2,\n", + " color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n", + " if kps[2, i1] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p1,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + " if kps[2, i2] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p2,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + "\n", + " # Blend the keypoints.\n", + " return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "If8z4OZfDHmC", + "colab_type": "text" + }, + "source": [ + "### Base Model Config\n", + "\n", + "This is the base model that we will fine-tune from. First we need to replace the bounding box heads and mask heads to make it compatible with our Shapes Dataset." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "wM0coO44ClbV", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "9a9c1f2d-0f6d-420e-b737-30b295b15935" + }, + "source": [ + "%%writefile base_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"catalog://Caffe2Detectron/COCO/35858933/e2e_mask_rcnn_R-50-FPN_1x\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing base_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mOo-0LGFEAmc", + "colab_type": "text" + }, + "source": [ + "### Pre-trained weight removal\n", + "\n", + "Here, the pre-trained weights of bbox, mask and class predictions are removed. This is done so that we can make the model shapes dataset compatible i.e predict 3 classes instead of Coco's 81 classes." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ISFsxBxBDZcQ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "\n", + "def removekey(d, listofkeys):\n", + " r = dict(d)\n", + " for key in listofkeys:\n", + " print('key: {} is removed'.format(key))\n", + " r.pop(key)\n", + " return r\n", + " \n", + "logger_dir = 'log'\n", + "\n", + "if logger_dir:\n", + " mkdir(logger_dir)\n", + "\n", + "logger = setup_logger(\"maskrcnn_benchmark\", logger_dir, get_rank())\n", + "logger.info(\"Using {} GPUs\".format(1))\n", + "\n", + "config_file = \"base_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n", + "\n", + "demo = COCODemo(\n", + " cfg, \n", + " min_image_size=800,\n", + " confidence_threshold=0.7)\n", + "\n", + "base_model = demo.model\n", + "\n", + "# Removes pretrained weights from state dict\n", + "new_state_dict = removekey(base_model.state_dict(), [ \n", + " \"roi_heads.box.predictor.cls_score.weight\", \"roi_heads.box.predictor.cls_score.bias\", \n", + " \"roi_heads.box.predictor.bbox_pred.weight\", \"roi_heads.box.predictor.bbox_pred.bias\",\n", + " \"roi_heads.mask.predictor.mask_fcn_logits.weight\", \"roi_heads.mask.predictor.mask_fcn_logits.bias\"\n", + " ])\n", + "\n", + "# Save new state dict, we will use this as our starting weights for our fine-tuned model\n", + "torch.save(new_state_dict, \"base_model.pth\")\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bbCBInqHFUg7", + "colab_type": "text" + }, + "source": [ + "### Fine Tuned Model Config\n", + "\n", + "Here we define our shape Dataset config. The important fields are \n", + "\n", + "1. WEIGHT: which point to our `base_model.pth` saved in the previous step\n", + "2. NUM_CLASSES: Which define how many classes we will predict . Note that the number includes the background, hence our shapes dataset has 4 classes. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5AhIiTgmFXyi", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "94f21f80-5baa-4a67-945a-0bfd726f5d11" + }, + "source": [ + "%%writefile shapes_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"base_model.pth\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " NUM_CLASSES: 4 # background + num_classes : IMPORTANT dont forget to add this\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 10, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing shapes_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tAn3omCjTFGI", + "colab_type": "text" + }, + "source": [ + "### Data Loader\n", + "\n", + "This function creates a data loader with our shapes dataset. This data loader is used internally in the repo to train the model." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oODu2UpVTHXz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_data_loader(cfg, dataset, is_train=True, is_distributed=False, start_iter=0):\n", + " num_gpus = get_world_size()\n", + " if is_train:\n", + " images_per_batch = cfg.SOLVER.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = True\n", + " num_iters = cfg.SOLVER.MAX_ITER\n", + " else:\n", + " images_per_batch = cfg.TEST.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = False if not is_distributed else True\n", + " num_iters = None\n", + " start_iter = 0\n", + "\n", + " if images_per_gpu > 1:\n", + " logger = logging.getLogger(__name__)\n", + " logger.warning(\n", + " \"When using more than one image per GPU you may encounter \"\n", + " \"an out-of-memory (OOM) error if your GPU does not have \"\n", + " \"sufficient memory. If this happens, you can reduce \"\n", + " \"SOLVER.IMS_PER_BATCH (for training) or \"\n", + " \"TEST.IMS_PER_BATCH (for inference). For training, you must \"\n", + " \"also adjust the learning rate and schedule length according \"\n", + " \"to the linear scaling rule. See for example: \"\n", + " \"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14\"\n", + " )\n", + "\n", + " # group images which have similar aspect ratio. In this case, we only\n", + " # group in two cases: those with width / height > 1, and the other way around,\n", + " # but the code supports more general grouping strategy\n", + " aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []\n", + "\n", + " paths_catalog = import_file(\n", + " \"maskrcnn_benchmark.config.paths_catalog\", cfg.PATHS_CATALOG, True\n", + " )\n", + " DatasetCatalog = paths_catalog.DatasetCatalog\n", + " dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST\n", + "\n", + " # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later\n", + " transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms(cfg, is_train)\n", + " \n", + " dataset.transforms = transforms\n", + " datasets = [ dataset ]\n", + " \n", + " data_loaders = []\n", + " for dataset in datasets:\n", + " sampler = make_data_sampler(dataset, shuffle, is_distributed)\n", + " batch_sampler = make_batch_data_sampler(\n", + " dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter\n", + " )\n", + " collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \\\n", + " BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " num_workers = cfg.DATALOADER.NUM_WORKERS\n", + " data_loader = torch.utils.data.DataLoader(\n", + " dataset,\n", + " num_workers=num_workers,\n", + " batch_sampler=batch_sampler,\n", + " collate_fn=collator,\n", + " )\n", + " data_loaders.append(data_loader)\n", + " if is_train:\n", + " # during training, a single (possibly concatenated) data_loader is returned\n", + " assert len(data_loaders) == 1\n", + " return data_loaders[0]\n", + " return data_loaders" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BTKsrHa-TkGr", + "colab_type": "text" + }, + "source": [ + "### Train Function\n", + "\n", + "The train function is the entry point into the training process. It creates data loaders, optimisers, loads from checkpoint. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "LYTguCvrTnHW", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# See if we can use apex.DistributedDataParallel instead of the torch default,\n", + "# and enable mixed-precision via apex.amp\n", + "try:\n", + " from apex import amp\n", + "except ImportError:\n", + " raise ImportError('Use APEX for multi-precision via apex.amp')\n", + "\n", + "def train(cfg, local_rank, distributed, dataset):\n", + " model = build_detection_model(cfg)\n", + " device = torch.device('cuda')\n", + " model.to(device)\n", + "\n", + " optimizer = make_optimizer(cfg, model)\n", + " scheduler = make_lr_scheduler(cfg, optimizer)\n", + "\n", + " # Initialize mixed-precision training\n", + " use_mixed_precision = cfg.DTYPE == \"float16\"\n", + " amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n", + " model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)\n", + "\n", + " if distributed:\n", + " model = torch.nn.parallel.DistributedDataParallel(\n", + " model, device_ids=[local_rank], output_device=local_rank,\n", + " # this should be removed if we update BatchNorm stats\n", + " broadcast_buffers=False,\n", + " )\n", + "\n", + " arguments = {}\n", + " arguments[\"iteration\"] = 0\n", + "\n", + " output_dir = cfg.OUTPUT_DIR\n", + " save_to_disk = get_rank() == 0\n", + " checkpointer = DetectronCheckpointer(\n", + " cfg, model, optimizer, scheduler, output_dir, save_to_disk\n", + " )\n", + " extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n", + " arguments.update(extra_checkpoint_data)\n", + "\n", + "\n", + " data_loader = build_data_loader(cfg, dataset)\n", + "\n", + " checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n", + "\n", + " do_train(\n", + " model,\n", + " data_loader,\n", + " optimizer,\n", + " scheduler,\n", + " checkpointer,\n", + " device,\n", + " checkpoint_period,\n", + " arguments,\n", + " )\n", + "\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r-SfVh-qCmhe", + "colab_type": "text" + }, + "source": [ + "## Set training config and train\n", + "\n", + "here we fire off training by calling the above function. before that we set some important config for our training. We make our dataset and update our config. Then we fire off training !" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ad0VIyZqVDXy", + "colab_type": "code", + "outputId": "3b5a93ff-9203-4530-fc86-de557fe4428c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 6735 + } + }, + "source": [ + "config_file = \"shapes_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'shapeDir']) # The output folder where all our model checkpoints will be saved during training.\n", + "cfg.merge_from_list(['SOLVER.IMS_PER_BATCH', 4]) # Number of images to take inside a single batch. This number depends on the size of your GPU\n", + "cfg.merge_from_list(['SOLVER.BASE_LR', 0.0050]) # The Learning Rate when training starts. Please check Detectron scaling rules to determine your learning for your GPU setup. \n", + "cfg.merge_from_list(['SOLVER.MAX_ITER', 1000]) # The number of training iterations that will be executed during training. One iteration is given as one forward and backward pass of a mini batch of the network\n", + "cfg.merge_from_list(['SOLVER.STEPS', \"(700, 800)\"]) # These two numbers represent after how many iterations is the learning rate divided by 10. \n", + "cfg.merge_from_list(['TEST.IMS_PER_BATCH', 1]) # Batch size during testing/evaluation\n", + "cfg.merge_from_list(['MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN', 4000]) # This determines how many region proposals to take in for processing into the stage after the RPN. The rule is 1000*batch_size = 4*1000 \n", + "cfg.merge_from_list(['SOLVER.CHECKPOINT_PERIOD', 100]) # After how many iterations does one want to save the model.\n", + "\n", + "# Make the Output dir if one doesnt exist.\n", + "output_dir = cfg.OUTPUT_DIR\n", + "if output_dir:\n", + " mkdir(output_dir)\n", + "\n", + "# Start training.\n", + "model = train(cfg, local_rank=1, distributed=False, dataset=ShapeDataset(500))" + ], + "execution_count": 15, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n", + "Selected optimization level O0: Pure FP32 training.\n", + "\n", + "Defaults for this optimization level are:\n", + "enabled : True\n", + "opt_level : O0\n", + "cast_model_type : torch.float32\n", + "patch_torch_functions : False\n", + "keep_batchnorm_fp32 : None\n", + "master_weights : False\n", + "loss_scale : 1.0\n", + "Processing user overrides (additional kwargs that are not None)...\n", + "After processing overrides, optimization options are:\n", + "enabled : True\n", + "opt_level : O0\n", + "cast_model_type : torch.float32\n", + "patch_torch_functions : False\n", + "keep_batchnorm_fp32 : None\n", + "master_weights : False\n", + "loss_scale : 1.0\n", + "2019-06-02 18:25:29,979 maskrcnn_benchmark.utils.checkpoint INFO: Loading checkpoint from base_model.pth\n", + "2019-06-02 18:25:30,069 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn1.bias loaded from backbone.body.layer1.0.bn1.bias of shape (64,)\n", + "2019-06-02 18:25:30,070 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn1.running_mean loaded from backbone.body.layer1.0.bn1.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,074 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn1.running_var loaded from backbone.body.layer1.0.bn1.running_var of shape (64,)\n", + "2019-06-02 18:25:30,076 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn1.weight loaded from backbone.body.layer1.0.bn1.weight of shape (64,)\n", + "2019-06-02 18:25:30,077 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn2.bias loaded from backbone.body.layer1.0.bn2.bias of shape (64,)\n", + "2019-06-02 18:25:30,079 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn2.running_mean loaded from backbone.body.layer1.0.bn2.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,080 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn2.running_var loaded from backbone.body.layer1.0.bn2.running_var of shape (64,)\n", + "2019-06-02 18:25:30,082 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn2.weight loaded from backbone.body.layer1.0.bn2.weight of shape (64,)\n", + "2019-06-02 18:25:30,084 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn3.bias loaded from backbone.body.layer1.0.bn3.bias of shape (256,)\n", + "2019-06-02 18:25:30,085 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn3.running_mean loaded from backbone.body.layer1.0.bn3.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,087 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn3.running_var loaded from backbone.body.layer1.0.bn3.running_var of shape (256,)\n", + "2019-06-02 18:25:30,088 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.bn3.weight loaded from backbone.body.layer1.0.bn3.weight of shape (256,)\n", + "2019-06-02 18:25:30,089 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.conv1.weight loaded from backbone.body.layer1.0.conv1.weight of shape (64, 64, 1, 1)\n", + "2019-06-02 18:25:30,091 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.conv2.weight loaded from backbone.body.layer1.0.conv2.weight of shape (64, 64, 3, 3)\n", + "2019-06-02 18:25:30,092 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.conv3.weight loaded from backbone.body.layer1.0.conv3.weight of shape (256, 64, 1, 1)\n", + "2019-06-02 18:25:30,094 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.downsample.0.weight loaded from backbone.body.layer1.0.downsample.0.weight of shape (256, 64, 1, 1)\n", + "2019-06-02 18:25:30,095 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.downsample.1.bias loaded from backbone.body.layer1.0.downsample.1.bias of shape (256,)\n", + "2019-06-02 18:25:30,097 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.downsample.1.running_mean loaded from backbone.body.layer1.0.downsample.1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,098 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.downsample.1.running_var loaded from backbone.body.layer1.0.downsample.1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,100 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.0.downsample.1.weight loaded from backbone.body.layer1.0.downsample.1.weight of shape (256,)\n", + "2019-06-02 18:25:30,101 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn1.bias loaded from backbone.body.layer1.1.bn1.bias of shape (64,)\n", + "2019-06-02 18:25:30,103 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn1.running_mean loaded from backbone.body.layer1.1.bn1.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,105 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn1.running_var loaded from backbone.body.layer1.1.bn1.running_var of shape (64,)\n", + "2019-06-02 18:25:30,107 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn1.weight loaded from backbone.body.layer1.1.bn1.weight of shape (64,)\n", + "2019-06-02 18:25:30,108 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn2.bias loaded from backbone.body.layer1.1.bn2.bias of shape (64,)\n", + "2019-06-02 18:25:30,110 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn2.running_mean loaded from backbone.body.layer1.1.bn2.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,111 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn2.running_var loaded from backbone.body.layer1.1.bn2.running_var of shape (64,)\n", + "2019-06-02 18:25:30,112 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn2.weight loaded from backbone.body.layer1.1.bn2.weight of shape (64,)\n", + "2019-06-02 18:25:30,113 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn3.bias loaded from backbone.body.layer1.1.bn3.bias of shape (256,)\n", + "2019-06-02 18:25:30,115 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn3.running_mean loaded from backbone.body.layer1.1.bn3.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,116 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn3.running_var loaded from backbone.body.layer1.1.bn3.running_var of shape (256,)\n", + "2019-06-02 18:25:30,117 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.bn3.weight loaded from backbone.body.layer1.1.bn3.weight of shape (256,)\n", + "2019-06-02 18:25:30,118 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.conv1.weight loaded from backbone.body.layer1.1.conv1.weight of shape (64, 256, 1, 1)\n", + "2019-06-02 18:25:30,119 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.conv2.weight loaded from backbone.body.layer1.1.conv2.weight of shape (64, 64, 3, 3)\n", + "2019-06-02 18:25:30,121 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.1.conv3.weight loaded from backbone.body.layer1.1.conv3.weight of shape (256, 64, 1, 1)\n", + "2019-06-02 18:25:30,122 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn1.bias loaded from backbone.body.layer1.2.bn1.bias of shape (64,)\n", + "2019-06-02 18:25:30,123 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn1.running_mean loaded from backbone.body.layer1.2.bn1.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,124 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn1.running_var loaded from backbone.body.layer1.2.bn1.running_var of shape (64,)\n", + "2019-06-02 18:25:30,126 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn1.weight loaded from backbone.body.layer1.2.bn1.weight of shape (64,)\n", + "2019-06-02 18:25:30,127 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn2.bias loaded from backbone.body.layer1.2.bn2.bias of shape (64,)\n", + "2019-06-02 18:25:30,128 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn2.running_mean loaded from backbone.body.layer1.2.bn2.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,129 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn2.running_var loaded from backbone.body.layer1.2.bn2.running_var of shape (64,)\n", + "2019-06-02 18:25:30,130 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn2.weight loaded from backbone.body.layer1.2.bn2.weight of shape (64,)\n", + "2019-06-02 18:25:30,131 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn3.bias loaded from backbone.body.layer1.2.bn3.bias of shape (256,)\n", + "2019-06-02 18:25:30,133 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn3.running_mean loaded from backbone.body.layer1.2.bn3.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,134 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn3.running_var loaded from backbone.body.layer1.2.bn3.running_var of shape (256,)\n", + "2019-06-02 18:25:30,135 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.bn3.weight loaded from backbone.body.layer1.2.bn3.weight of shape (256,)\n", + "2019-06-02 18:25:30,136 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.conv1.weight loaded from backbone.body.layer1.2.conv1.weight of shape (64, 256, 1, 1)\n", + "2019-06-02 18:25:30,137 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.conv2.weight loaded from backbone.body.layer1.2.conv2.weight of shape (64, 64, 3, 3)\n", + "2019-06-02 18:25:30,138 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer1.2.conv3.weight loaded from backbone.body.layer1.2.conv3.weight of shape (256, 64, 1, 1)\n", + "2019-06-02 18:25:30,140 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn1.bias loaded from backbone.body.layer2.0.bn1.bias of shape (128,)\n", + "2019-06-02 18:25:30,141 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn1.running_mean loaded from backbone.body.layer2.0.bn1.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,142 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn1.running_var loaded from backbone.body.layer2.0.bn1.running_var of shape (128,)\n", + "2019-06-02 18:25:30,143 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn1.weight loaded from backbone.body.layer2.0.bn1.weight of shape (128,)\n", + "2019-06-02 18:25:30,144 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn2.bias loaded from backbone.body.layer2.0.bn2.bias of shape (128,)\n", + "2019-06-02 18:25:30,145 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn2.running_mean loaded from backbone.body.layer2.0.bn2.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,146 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn2.running_var loaded from backbone.body.layer2.0.bn2.running_var of shape (128,)\n", + "2019-06-02 18:25:30,147 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn2.weight loaded from backbone.body.layer2.0.bn2.weight of shape (128,)\n", + "2019-06-02 18:25:30,148 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn3.bias loaded from backbone.body.layer2.0.bn3.bias of shape (512,)\n", + "2019-06-02 18:25:30,149 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn3.running_mean loaded from backbone.body.layer2.0.bn3.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,150 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn3.running_var loaded from backbone.body.layer2.0.bn3.running_var of shape (512,)\n", + "2019-06-02 18:25:30,151 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.bn3.weight loaded from backbone.body.layer2.0.bn3.weight of shape (512,)\n", + "2019-06-02 18:25:30,152 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.conv1.weight loaded from backbone.body.layer2.0.conv1.weight of shape (128, 256, 1, 1)\n", + "2019-06-02 18:25:30,154 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.conv2.weight loaded from backbone.body.layer2.0.conv2.weight of shape (128, 128, 3, 3)\n", + "2019-06-02 18:25:30,155 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.conv3.weight loaded from backbone.body.layer2.0.conv3.weight of shape (512, 128, 1, 1)\n", + "2019-06-02 18:25:30,157 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.downsample.0.weight loaded from backbone.body.layer2.0.downsample.0.weight of shape (512, 256, 1, 1)\n", + "2019-06-02 18:25:30,158 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.downsample.1.bias loaded from backbone.body.layer2.0.downsample.1.bias of shape (512,)\n", + "2019-06-02 18:25:30,159 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.downsample.1.running_mean loaded from backbone.body.layer2.0.downsample.1.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,160 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.downsample.1.running_var loaded from backbone.body.layer2.0.downsample.1.running_var of shape (512,)\n", + "2019-06-02 18:25:30,161 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.0.downsample.1.weight loaded from backbone.body.layer2.0.downsample.1.weight of shape (512,)\n", + "2019-06-02 18:25:30,162 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn1.bias loaded from backbone.body.layer2.1.bn1.bias of shape (128,)\n", + "2019-06-02 18:25:30,163 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn1.running_mean loaded from backbone.body.layer2.1.bn1.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,164 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn1.running_var loaded from backbone.body.layer2.1.bn1.running_var of shape (128,)\n", + "2019-06-02 18:25:30,166 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn1.weight loaded from backbone.body.layer2.1.bn1.weight of shape (128,)\n", + "2019-06-02 18:25:30,167 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn2.bias loaded from backbone.body.layer2.1.bn2.bias of shape (128,)\n", + "2019-06-02 18:25:30,167 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn2.running_mean loaded from backbone.body.layer2.1.bn2.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,168 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn2.running_var loaded from backbone.body.layer2.1.bn2.running_var of shape (128,)\n", + "2019-06-02 18:25:30,169 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn2.weight loaded from backbone.body.layer2.1.bn2.weight of shape (128,)\n", + "2019-06-02 18:25:30,170 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn3.bias loaded from backbone.body.layer2.1.bn3.bias of shape (512,)\n", + "2019-06-02 18:25:30,171 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn3.running_mean loaded from backbone.body.layer2.1.bn3.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,172 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn3.running_var loaded from backbone.body.layer2.1.bn3.running_var of shape (512,)\n", + "2019-06-02 18:25:30,173 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.bn3.weight loaded from backbone.body.layer2.1.bn3.weight of shape (512,)\n", + "2019-06-02 18:25:30,174 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.conv1.weight loaded from backbone.body.layer2.1.conv1.weight of shape (128, 512, 1, 1)\n", + "2019-06-02 18:25:30,174 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.conv2.weight loaded from backbone.body.layer2.1.conv2.weight of shape (128, 128, 3, 3)\n", + "2019-06-02 18:25:30,175 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.1.conv3.weight loaded from backbone.body.layer2.1.conv3.weight of shape (512, 128, 1, 1)\n", + "2019-06-02 18:25:30,176 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn1.bias loaded from backbone.body.layer2.2.bn1.bias of shape (128,)\n", + "2019-06-02 18:25:30,177 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn1.running_mean loaded from backbone.body.layer2.2.bn1.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,178 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn1.running_var loaded from backbone.body.layer2.2.bn1.running_var of shape (128,)\n", + "2019-06-02 18:25:30,178 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn1.weight loaded from backbone.body.layer2.2.bn1.weight of shape (128,)\n", + "2019-06-02 18:25:30,179 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn2.bias loaded from backbone.body.layer2.2.bn2.bias of shape (128,)\n", + "2019-06-02 18:25:30,180 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn2.running_mean loaded from backbone.body.layer2.2.bn2.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,181 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn2.running_var loaded from backbone.body.layer2.2.bn2.running_var of shape (128,)\n", + "2019-06-02 18:25:30,182 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn2.weight loaded from backbone.body.layer2.2.bn2.weight of shape (128,)\n", + "2019-06-02 18:25:30,183 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn3.bias loaded from backbone.body.layer2.2.bn3.bias of shape (512,)\n", + "2019-06-02 18:25:30,184 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn3.running_mean loaded from backbone.body.layer2.2.bn3.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,184 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn3.running_var loaded from backbone.body.layer2.2.bn3.running_var of shape (512,)\n", + "2019-06-02 18:25:30,186 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.bn3.weight loaded from backbone.body.layer2.2.bn3.weight of shape (512,)\n", + "2019-06-02 18:25:30,186 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.conv1.weight loaded from backbone.body.layer2.2.conv1.weight of shape (128, 512, 1, 1)\n", + "2019-06-02 18:25:30,187 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.conv2.weight loaded from backbone.body.layer2.2.conv2.weight of shape (128, 128, 3, 3)\n", + "2019-06-02 18:25:30,188 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.2.conv3.weight loaded from backbone.body.layer2.2.conv3.weight of shape (512, 128, 1, 1)\n", + "2019-06-02 18:25:30,189 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn1.bias loaded from backbone.body.layer2.3.bn1.bias of shape (128,)\n", + "2019-06-02 18:25:30,190 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn1.running_mean loaded from backbone.body.layer2.3.bn1.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,190 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn1.running_var loaded from backbone.body.layer2.3.bn1.running_var of shape (128,)\n", + "2019-06-02 18:25:30,191 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn1.weight loaded from backbone.body.layer2.3.bn1.weight of shape (128,)\n", + "2019-06-02 18:25:30,192 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn2.bias loaded from backbone.body.layer2.3.bn2.bias of shape (128,)\n", + "2019-06-02 18:25:30,193 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn2.running_mean loaded from backbone.body.layer2.3.bn2.running_mean of shape (128,)\n", + "2019-06-02 18:25:30,194 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn2.running_var loaded from backbone.body.layer2.3.bn2.running_var of shape (128,)\n", + "2019-06-02 18:25:30,195 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn2.weight loaded from backbone.body.layer2.3.bn2.weight of shape (128,)\n", + "2019-06-02 18:25:30,196 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn3.bias loaded from backbone.body.layer2.3.bn3.bias of shape (512,)\n", + "2019-06-02 18:25:30,197 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn3.running_mean loaded from backbone.body.layer2.3.bn3.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,200 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn3.running_var loaded from backbone.body.layer2.3.bn3.running_var of shape (512,)\n", + "2019-06-02 18:25:30,201 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.bn3.weight loaded from backbone.body.layer2.3.bn3.weight of shape (512,)\n", + "2019-06-02 18:25:30,202 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.conv1.weight loaded from backbone.body.layer2.3.conv1.weight of shape (128, 512, 1, 1)\n", + "2019-06-02 18:25:30,203 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.conv2.weight loaded from backbone.body.layer2.3.conv2.weight of shape (128, 128, 3, 3)\n", + "2019-06-02 18:25:30,204 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer2.3.conv3.weight loaded from backbone.body.layer2.3.conv3.weight of shape (512, 128, 1, 1)\n", + "2019-06-02 18:25:30,205 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn1.bias loaded from backbone.body.layer3.0.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,206 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn1.running_mean loaded from backbone.body.layer3.0.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,207 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn1.running_var loaded from backbone.body.layer3.0.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,208 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn1.weight loaded from backbone.body.layer3.0.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,211 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn2.bias loaded from backbone.body.layer3.0.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,212 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn2.running_mean loaded from backbone.body.layer3.0.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,214 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn2.running_var loaded from backbone.body.layer3.0.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,216 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn2.weight loaded from backbone.body.layer3.0.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,217 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn3.bias loaded from backbone.body.layer3.0.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,218 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn3.running_mean loaded from backbone.body.layer3.0.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,220 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn3.running_var loaded from backbone.body.layer3.0.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,221 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.bn3.weight loaded from backbone.body.layer3.0.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,222 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.conv1.weight loaded from backbone.body.layer3.0.conv1.weight of shape (256, 512, 1, 1)\n", + "2019-06-02 18:25:30,224 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.conv2.weight loaded from backbone.body.layer3.0.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,226 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.conv3.weight loaded from backbone.body.layer3.0.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,227 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.downsample.0.weight loaded from backbone.body.layer3.0.downsample.0.weight of shape (1024, 512, 1, 1)\n", + "2019-06-02 18:25:30,228 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.downsample.1.bias loaded from backbone.body.layer3.0.downsample.1.bias of shape (1024,)\n", + "2019-06-02 18:25:30,230 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.downsample.1.running_mean loaded from backbone.body.layer3.0.downsample.1.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,231 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.downsample.1.running_var loaded from backbone.body.layer3.0.downsample.1.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,232 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.0.downsample.1.weight loaded from backbone.body.layer3.0.downsample.1.weight of shape (1024,)\n", + "2019-06-02 18:25:30,234 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn1.bias loaded from backbone.body.layer3.1.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,236 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn1.running_mean loaded from backbone.body.layer3.1.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,237 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn1.running_var loaded from backbone.body.layer3.1.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,238 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn1.weight loaded from backbone.body.layer3.1.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,240 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn2.bias loaded from backbone.body.layer3.1.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,241 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn2.running_mean loaded from backbone.body.layer3.1.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,243 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn2.running_var loaded from backbone.body.layer3.1.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,245 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn2.weight loaded from backbone.body.layer3.1.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,246 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn3.bias loaded from backbone.body.layer3.1.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,248 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn3.running_mean loaded from backbone.body.layer3.1.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,250 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn3.running_var loaded from backbone.body.layer3.1.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,252 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.bn3.weight loaded from backbone.body.layer3.1.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,253 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.conv1.weight loaded from backbone.body.layer3.1.conv1.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,255 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.conv2.weight loaded from backbone.body.layer3.1.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,256 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.1.conv3.weight loaded from backbone.body.layer3.1.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,257 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn1.bias loaded from backbone.body.layer3.2.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,260 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn1.running_mean loaded from backbone.body.layer3.2.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,265 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn1.running_var loaded from backbone.body.layer3.2.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,266 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn1.weight loaded from backbone.body.layer3.2.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,267 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn2.bias loaded from backbone.body.layer3.2.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,268 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn2.running_mean loaded from backbone.body.layer3.2.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,270 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn2.running_var loaded from backbone.body.layer3.2.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,271 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn2.weight loaded from backbone.body.layer3.2.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,272 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn3.bias loaded from backbone.body.layer3.2.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,275 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn3.running_mean loaded from backbone.body.layer3.2.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,276 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn3.running_var loaded from backbone.body.layer3.2.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,277 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.bn3.weight loaded from backbone.body.layer3.2.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,278 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.conv1.weight loaded from backbone.body.layer3.2.conv1.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,281 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.conv2.weight loaded from backbone.body.layer3.2.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,282 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.2.conv3.weight loaded from backbone.body.layer3.2.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,284 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn1.bias loaded from backbone.body.layer3.3.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,285 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn1.running_mean loaded from backbone.body.layer3.3.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,286 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn1.running_var loaded from backbone.body.layer3.3.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,288 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn1.weight loaded from backbone.body.layer3.3.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,290 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn2.bias loaded from backbone.body.layer3.3.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,291 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn2.running_mean loaded from backbone.body.layer3.3.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,293 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn2.running_var loaded from backbone.body.layer3.3.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,295 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn2.weight loaded from backbone.body.layer3.3.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,296 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn3.bias loaded from backbone.body.layer3.3.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,297 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn3.running_mean loaded from backbone.body.layer3.3.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,299 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn3.running_var loaded from backbone.body.layer3.3.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,300 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.bn3.weight loaded from backbone.body.layer3.3.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,301 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.conv1.weight loaded from backbone.body.layer3.3.conv1.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,303 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.conv2.weight loaded from backbone.body.layer3.3.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,304 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.3.conv3.weight loaded from backbone.body.layer3.3.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,305 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn1.bias loaded from backbone.body.layer3.4.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,306 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn1.running_mean loaded from backbone.body.layer3.4.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,307 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn1.running_var loaded from backbone.body.layer3.4.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,310 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn1.weight loaded from backbone.body.layer3.4.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,311 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn2.bias loaded from backbone.body.layer3.4.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,313 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn2.running_mean loaded from backbone.body.layer3.4.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,314 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn2.running_var loaded from backbone.body.layer3.4.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,316 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn2.weight loaded from backbone.body.layer3.4.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,317 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn3.bias loaded from backbone.body.layer3.4.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,319 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn3.running_mean loaded from backbone.body.layer3.4.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,320 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn3.running_var loaded from backbone.body.layer3.4.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,322 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.bn3.weight loaded from backbone.body.layer3.4.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,324 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.conv1.weight loaded from backbone.body.layer3.4.conv1.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,325 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.conv2.weight loaded from backbone.body.layer3.4.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,326 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.4.conv3.weight loaded from backbone.body.layer3.4.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,327 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn1.bias loaded from backbone.body.layer3.5.bn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,329 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn1.running_mean loaded from backbone.body.layer3.5.bn1.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,331 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn1.running_var loaded from backbone.body.layer3.5.bn1.running_var of shape (256,)\n", + "2019-06-02 18:25:30,332 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn1.weight loaded from backbone.body.layer3.5.bn1.weight of shape (256,)\n", + "2019-06-02 18:25:30,334 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn2.bias loaded from backbone.body.layer3.5.bn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,335 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn2.running_mean loaded from backbone.body.layer3.5.bn2.running_mean of shape (256,)\n", + "2019-06-02 18:25:30,336 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn2.running_var loaded from backbone.body.layer3.5.bn2.running_var of shape (256,)\n", + "2019-06-02 18:25:30,338 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn2.weight loaded from backbone.body.layer3.5.bn2.weight of shape (256,)\n", + "2019-06-02 18:25:30,339 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn3.bias loaded from backbone.body.layer3.5.bn3.bias of shape (1024,)\n", + "2019-06-02 18:25:30,341 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn3.running_mean loaded from backbone.body.layer3.5.bn3.running_mean of shape (1024,)\n", + "2019-06-02 18:25:30,342 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn3.running_var loaded from backbone.body.layer3.5.bn3.running_var of shape (1024,)\n", + "2019-06-02 18:25:30,343 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.bn3.weight loaded from backbone.body.layer3.5.bn3.weight of shape (1024,)\n", + "2019-06-02 18:25:30,344 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.conv1.weight loaded from backbone.body.layer3.5.conv1.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,346 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.conv2.weight loaded from backbone.body.layer3.5.conv2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,348 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer3.5.conv3.weight loaded from backbone.body.layer3.5.conv3.weight of shape (1024, 256, 1, 1)\n", + "2019-06-02 18:25:30,350 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn1.bias loaded from backbone.body.layer4.0.bn1.bias of shape (512,)\n", + "2019-06-02 18:25:30,351 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn1.running_mean loaded from backbone.body.layer4.0.bn1.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,352 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn1.running_var loaded from backbone.body.layer4.0.bn1.running_var of shape (512,)\n", + "2019-06-02 18:25:30,354 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn1.weight loaded from backbone.body.layer4.0.bn1.weight of shape (512,)\n", + "2019-06-02 18:25:30,354 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn2.bias loaded from backbone.body.layer4.0.bn2.bias of shape (512,)\n", + "2019-06-02 18:25:30,356 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn2.running_mean loaded from backbone.body.layer4.0.bn2.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,359 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn2.running_var loaded from backbone.body.layer4.0.bn2.running_var of shape (512,)\n", + "2019-06-02 18:25:30,360 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn2.weight loaded from backbone.body.layer4.0.bn2.weight of shape (512,)\n", + "2019-06-02 18:25:30,362 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn3.bias loaded from backbone.body.layer4.0.bn3.bias of shape (2048,)\n", + "2019-06-02 18:25:30,363 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn3.running_mean loaded from backbone.body.layer4.0.bn3.running_mean of shape (2048,)\n", + "2019-06-02 18:25:30,364 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn3.running_var loaded from backbone.body.layer4.0.bn3.running_var of shape (2048,)\n", + "2019-06-02 18:25:30,366 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.bn3.weight loaded from backbone.body.layer4.0.bn3.weight of shape (2048,)\n", + "2019-06-02 18:25:30,368 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.conv1.weight loaded from backbone.body.layer4.0.conv1.weight of shape (512, 1024, 1, 1)\n", + "2019-06-02 18:25:30,369 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.conv2.weight loaded from backbone.body.layer4.0.conv2.weight of shape (512, 512, 3, 3)\n", + "2019-06-02 18:25:30,371 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.conv3.weight loaded from backbone.body.layer4.0.conv3.weight of shape (2048, 512, 1, 1)\n", + "2019-06-02 18:25:30,372 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.downsample.0.weight loaded from backbone.body.layer4.0.downsample.0.weight of shape (2048, 1024, 1, 1)\n", + "2019-06-02 18:25:30,373 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.downsample.1.bias loaded from backbone.body.layer4.0.downsample.1.bias of shape (2048,)\n", + "2019-06-02 18:25:30,374 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.downsample.1.running_mean loaded from backbone.body.layer4.0.downsample.1.running_mean of shape (2048,)\n", + "2019-06-02 18:25:30,376 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.downsample.1.running_var loaded from backbone.body.layer4.0.downsample.1.running_var of shape (2048,)\n", + "2019-06-02 18:25:30,378 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.0.downsample.1.weight loaded from backbone.body.layer4.0.downsample.1.weight of shape (2048,)\n", + "2019-06-02 18:25:30,380 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn1.bias loaded from backbone.body.layer4.1.bn1.bias of shape (512,)\n", + "2019-06-02 18:25:30,381 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn1.running_mean loaded from backbone.body.layer4.1.bn1.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,383 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn1.running_var loaded from backbone.body.layer4.1.bn1.running_var of shape (512,)\n", + "2019-06-02 18:25:30,384 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn1.weight loaded from backbone.body.layer4.1.bn1.weight of shape (512,)\n", + "2019-06-02 18:25:30,385 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn2.bias loaded from backbone.body.layer4.1.bn2.bias of shape (512,)\n", + "2019-06-02 18:25:30,387 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn2.running_mean loaded from backbone.body.layer4.1.bn2.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,388 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn2.running_var loaded from backbone.body.layer4.1.bn2.running_var of shape (512,)\n", + "2019-06-02 18:25:30,389 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn2.weight loaded from backbone.body.layer4.1.bn2.weight of shape (512,)\n", + "2019-06-02 18:25:30,390 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn3.bias loaded from backbone.body.layer4.1.bn3.bias of shape (2048,)\n", + "2019-06-02 18:25:30,392 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn3.running_mean loaded from backbone.body.layer4.1.bn3.running_mean of shape (2048,)\n", + "2019-06-02 18:25:30,393 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn3.running_var loaded from backbone.body.layer4.1.bn3.running_var of shape (2048,)\n", + "2019-06-02 18:25:30,394 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.bn3.weight loaded from backbone.body.layer4.1.bn3.weight of shape (2048,)\n", + "2019-06-02 18:25:30,397 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.conv1.weight loaded from backbone.body.layer4.1.conv1.weight of shape (512, 2048, 1, 1)\n", + "2019-06-02 18:25:30,398 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.conv2.weight loaded from backbone.body.layer4.1.conv2.weight of shape (512, 512, 3, 3)\n", + "2019-06-02 18:25:30,399 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.1.conv3.weight loaded from backbone.body.layer4.1.conv3.weight of shape (2048, 512, 1, 1)\n", + "2019-06-02 18:25:30,401 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn1.bias loaded from backbone.body.layer4.2.bn1.bias of shape (512,)\n", + "2019-06-02 18:25:30,402 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn1.running_mean loaded from backbone.body.layer4.2.bn1.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,403 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn1.running_var loaded from backbone.body.layer4.2.bn1.running_var of shape (512,)\n", + "2019-06-02 18:25:30,405 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn1.weight loaded from backbone.body.layer4.2.bn1.weight of shape (512,)\n", + "2019-06-02 18:25:30,406 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn2.bias loaded from backbone.body.layer4.2.bn2.bias of shape (512,)\n", + "2019-06-02 18:25:30,408 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn2.running_mean loaded from backbone.body.layer4.2.bn2.running_mean of shape (512,)\n", + "2019-06-02 18:25:30,410 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn2.running_var loaded from backbone.body.layer4.2.bn2.running_var of shape (512,)\n", + "2019-06-02 18:25:30,411 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn2.weight loaded from backbone.body.layer4.2.bn2.weight of shape (512,)\n", + "2019-06-02 18:25:30,413 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn3.bias loaded from backbone.body.layer4.2.bn3.bias of shape (2048,)\n", + "2019-06-02 18:25:30,414 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn3.running_mean loaded from backbone.body.layer4.2.bn3.running_mean of shape (2048,)\n", + "2019-06-02 18:25:30,415 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn3.running_var loaded from backbone.body.layer4.2.bn3.running_var of shape (2048,)\n", + "2019-06-02 18:25:30,417 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.bn3.weight loaded from backbone.body.layer4.2.bn3.weight of shape (2048,)\n", + "2019-06-02 18:25:30,421 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.conv1.weight loaded from backbone.body.layer4.2.conv1.weight of shape (512, 2048, 1, 1)\n", + "2019-06-02 18:25:30,423 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.conv2.weight loaded from backbone.body.layer4.2.conv2.weight of shape (512, 512, 3, 3)\n", + "2019-06-02 18:25:30,424 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.layer4.2.conv3.weight loaded from backbone.body.layer4.2.conv3.weight of shape (2048, 512, 1, 1)\n", + "2019-06-02 18:25:30,425 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.stem.bn1.bias loaded from backbone.body.stem.bn1.bias of shape (64,)\n", + "2019-06-02 18:25:30,426 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.stem.bn1.running_mean loaded from backbone.body.stem.bn1.running_mean of shape (64,)\n", + "2019-06-02 18:25:30,428 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.stem.bn1.running_var loaded from backbone.body.stem.bn1.running_var of shape (64,)\n", + "2019-06-02 18:25:30,429 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.stem.bn1.weight loaded from backbone.body.stem.bn1.weight of shape (64,)\n", + "2019-06-02 18:25:30,431 maskrcnn_benchmark.utils.model_serialization INFO: backbone.body.stem.conv1.weight loaded from backbone.body.stem.conv1.weight of shape (64, 3, 7, 7)\n", + "2019-06-02 18:25:30,432 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner1.bias loaded from backbone.fpn.fpn_inner1.bias of shape (256,)\n", + "2019-06-02 18:25:30,434 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner1.weight loaded from backbone.fpn.fpn_inner1.weight of shape (256, 256, 1, 1)\n", + "2019-06-02 18:25:30,435 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner2.bias loaded from backbone.fpn.fpn_inner2.bias of shape (256,)\n", + "2019-06-02 18:25:30,437 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner2.weight loaded from backbone.fpn.fpn_inner2.weight of shape (256, 512, 1, 1)\n", + "2019-06-02 18:25:30,438 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner3.bias loaded from backbone.fpn.fpn_inner3.bias of shape (256,)\n", + "2019-06-02 18:25:30,439 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner3.weight loaded from backbone.fpn.fpn_inner3.weight of shape (256, 1024, 1, 1)\n", + "2019-06-02 18:25:30,442 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner4.bias loaded from backbone.fpn.fpn_inner4.bias of shape (256,)\n", + "2019-06-02 18:25:30,443 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_inner4.weight loaded from backbone.fpn.fpn_inner4.weight of shape (256, 2048, 1, 1)\n", + "2019-06-02 18:25:30,445 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer1.bias loaded from backbone.fpn.fpn_layer1.bias of shape (256,)\n", + "2019-06-02 18:25:30,446 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer1.weight loaded from backbone.fpn.fpn_layer1.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,448 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer2.bias loaded from backbone.fpn.fpn_layer2.bias of shape (256,)\n", + "2019-06-02 18:25:30,449 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer2.weight loaded from backbone.fpn.fpn_layer2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,451 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer3.bias loaded from backbone.fpn.fpn_layer3.bias of shape (256,)\n", + "2019-06-02 18:25:30,452 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer3.weight loaded from backbone.fpn.fpn_layer3.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,454 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer4.bias loaded from backbone.fpn.fpn_layer4.bias of shape (256,)\n", + "2019-06-02 18:25:30,455 maskrcnn_benchmark.utils.model_serialization INFO: backbone.fpn.fpn_layer4.weight loaded from backbone.fpn.fpn_layer4.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,456 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.box.feature_extractor.fc6.bias loaded from roi_heads.box.feature_extractor.fc6.bias of shape (1024,)\n", + "2019-06-02 18:25:30,458 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.box.feature_extractor.fc6.weight loaded from roi_heads.box.feature_extractor.fc6.weight of shape (1024, 12544)\n", + "2019-06-02 18:25:30,460 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.box.feature_extractor.fc7.bias loaded from roi_heads.box.feature_extractor.fc7.bias of shape (1024,)\n", + "2019-06-02 18:25:30,461 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.box.feature_extractor.fc7.weight loaded from roi_heads.box.feature_extractor.fc7.weight of shape (1024, 1024)\n", + "2019-06-02 18:25:30,462 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn1.bias loaded from roi_heads.mask.feature_extractor.mask_fcn1.bias of shape (256,)\n", + "2019-06-02 18:25:30,463 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn1.weight loaded from roi_heads.mask.feature_extractor.mask_fcn1.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,464 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn2.bias loaded from roi_heads.mask.feature_extractor.mask_fcn2.bias of shape (256,)\n", + "2019-06-02 18:25:30,466 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn2.weight loaded from roi_heads.mask.feature_extractor.mask_fcn2.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,467 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn3.bias loaded from roi_heads.mask.feature_extractor.mask_fcn3.bias of shape (256,)\n", + "2019-06-02 18:25:30,469 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn3.weight loaded from roi_heads.mask.feature_extractor.mask_fcn3.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,470 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn4.bias loaded from roi_heads.mask.feature_extractor.mask_fcn4.bias of shape (256,)\n", + "2019-06-02 18:25:30,471 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.feature_extractor.mask_fcn4.weight loaded from roi_heads.mask.feature_extractor.mask_fcn4.weight of shape (256, 256, 3, 3)\n", + "2019-06-02 18:25:30,473 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.predictor.conv5_mask.bias loaded from roi_heads.mask.predictor.conv5_mask.bias of shape (256,)\n", + "2019-06-02 18:25:30,474 maskrcnn_benchmark.utils.model_serialization INFO: roi_heads.mask.predictor.conv5_mask.weight loaded from roi_heads.mask.predictor.conv5_mask.weight of shape (256, 256, 2, 2)\n", + "2019-06-02 18:25:30,475 maskrcnn_benchmark.utils.model_serialization INFO: rpn.anchor_generator.cell_anchors.0 loaded from rpn.anchor_generator.cell_anchors.0 of shape (3, 4)\n", + "2019-06-02 18:25:30,477 maskrcnn_benchmark.utils.model_serialization INFO: rpn.anchor_generator.cell_anchors.1 loaded from rpn.anchor_generator.cell_anchors.1 of shape (3, 4)\n", + "2019-06-02 18:25:30,479 maskrcnn_benchmark.utils.model_serialization INFO: rpn.anchor_generator.cell_anchors.2 loaded from rpn.anchor_generator.cell_anchors.2 of shape (3, 4)\n", + "2019-06-02 18:25:30,480 maskrcnn_benchmark.utils.model_serialization INFO: rpn.anchor_generator.cell_anchors.3 loaded from rpn.anchor_generator.cell_anchors.3 of shape (3, 4)\n", + "2019-06-02 18:25:30,482 maskrcnn_benchmark.utils.model_serialization INFO: rpn.anchor_generator.cell_anchors.4 loaded from rpn.anchor_generator.cell_anchors.4 of shape (3, 4)\n", + "2019-06-02 18:25:30,484 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.bbox_pred.bias loaded from rpn.head.bbox_pred.bias of shape (12,)\n", + "2019-06-02 18:25:30,485 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.bbox_pred.weight loaded from rpn.head.bbox_pred.weight of shape (12, 256, 1, 1)\n", + "2019-06-02 18:25:30,486 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.cls_logits.bias loaded from rpn.head.cls_logits.bias of shape (3,)\n", + "2019-06-02 18:25:30,488 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.cls_logits.weight loaded from rpn.head.cls_logits.weight of shape (3, 256, 1, 1)\n", + "2019-06-02 18:25:30,489 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.conv.bias loaded from rpn.head.conv.bias of shape (256,)\n", + "2019-06-02 18:25:30,491 maskrcnn_benchmark.utils.model_serialization INFO: rpn.head.conv.weight loaded from rpn.head.conv.weight of shape (256, 256, 3, 3)\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "When using more than one image per GPU you may encounter an out-of-memory (OOM) error if your GPU does not have sufficient memory. If this happens, you can reduce SOLVER.IMS_PER_BATCH (for training) or TEST.IMS_PER_BATCH (for inference). For training, you must also adjust the learning rate and schedule length according to the linear scaling rule. See for example: https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "2019-06-02 18:25:30,618 maskrcnn_benchmark.trainer INFO: Start training\n", + "2019-06-02 18:26:01,757 maskrcnn_benchmark.trainer INFO: eta: 0:25:25 iter: 20 loss: 1.1445 (1.5619) loss_classifier: 0.2888 (0.4150) loss_box_reg: 0.1517 (0.1493) loss_mask: 0.6485 (0.9632) loss_objectness: 0.0139 (0.0225) loss_rpn_box_reg: 0.0110 (0.0118) time: 1.5431 (1.5567) data: 0.0098 (0.0571) lr: 0.001800 max mem: 3682\n", + "2019-06-02 18:26:39,893 maskrcnn_benchmark.trainer INFO: eta: 0:27:42 iter: 40 loss: 1.1412 (1.3516) loss_classifier: 0.3149 (0.3693) loss_box_reg: 0.2243 (0.1846) loss_mask: 0.5719 (0.7712) loss_objectness: 0.0105 (0.0165) loss_rpn_box_reg: 0.0080 (0.0100) time: 1.8800 (1.7318) data: 0.0097 (0.0336) lr: 0.001933 max mem: 3964\n", + "2019-06-02 18:27:17,478 maskrcnn_benchmark.trainer INFO: eta: 0:27:54 iter: 60 loss: 1.0707 (1.2536) loss_classifier: 0.2933 (0.3452) loss_box_reg: 0.2103 (0.1916) loss_mask: 0.5354 (0.6944) loss_objectness: 0.0071 (0.0133) loss_rpn_box_reg: 0.0077 (0.0092) time: 1.8667 (1.7809) data: 0.0097 (0.0257) lr: 0.002067 max mem: 3964\n", + "2019-06-02 18:27:51,484 maskrcnn_benchmark.trainer INFO: eta: 0:26:59 iter: 80 loss: 0.8470 (1.1564) loss_classifier: 0.2146 (0.3149) loss_box_reg: 0.1536 (0.1821) loss_mask: 0.4718 (0.6395) loss_objectness: 0.0047 (0.0113) loss_rpn_box_reg: 0.0064 (0.0087) time: 1.6526 (1.7608) data: 0.0098 (0.0219) lr: 0.002200 max mem: 3964\n", + "2019-06-02 18:28:24,797 maskrcnn_benchmark.trainer INFO: eta: 0:26:07 iter: 100 loss: 0.6682 (1.0617) loss_classifier: 0.1826 (0.2883) loss_box_reg: 0.1375 (0.1720) loss_mask: 0.3474 (0.5834) loss_objectness: 0.0029 (0.0099) loss_rpn_box_reg: 0.0048 (0.0081) time: 1.7250 (1.7418) data: 0.0100 (0.0196) lr: 0.002333 max mem: 3964\n", + "2019-06-02 18:28:24,800 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000100.pth\n", + "2019-06-02 18:29:01,969 maskrcnn_benchmark.trainer INFO: eta: 0:25:49 iter: 120 loss: 0.6775 (1.0049) loss_classifier: 0.1876 (0.2719) loss_box_reg: 0.1401 (0.1673) loss_mask: 0.3231 (0.5485) loss_objectness: 0.0045 (0.0093) loss_rpn_box_reg: 0.0057 (0.0079) time: 1.8055 (1.7612) data: 0.0100 (0.0300) lr: 0.002467 max mem: 3964\n", + "2019-06-02 18:29:38,625 maskrcnn_benchmark.trainer INFO: eta: 0:25:23 iter: 140 loss: 0.5469 (0.9371) loss_classifier: 0.1636 (0.2570) loss_box_reg: 0.0984 (0.1575) loss_mask: 0.2496 (0.5061) loss_objectness: 0.0036 (0.0088) loss_rpn_box_reg: 0.0058 (0.0077) time: 1.8503 (1.7715) data: 0.0099 (0.0271) lr: 0.002600 max mem: 3984\n", + "2019-06-02 18:30:16,395 maskrcnn_benchmark.trainer INFO: eta: 0:25:00 iter: 160 loss: 0.4306 (0.8793) loss_classifier: 0.1549 (0.2447) loss_box_reg: 0.0649 (0.1465) loss_mask: 0.2048 (0.4718) loss_objectness: 0.0040 (0.0086) loss_rpn_box_reg: 0.0057 (0.0077) time: 1.9542 (1.7861) data: 0.0098 (0.0250) lr: 0.002733 max mem: 3984\n", + "2019-06-02 18:30:54,339 maskrcnn_benchmark.trainer INFO: eta: 0:24:34 iter: 180 loss: 0.4060 (0.8277) loss_classifier: 0.1222 (0.2322) loss_box_reg: 0.0425 (0.1354) loss_mask: 0.2194 (0.4441) loss_objectness: 0.0057 (0.0084) loss_rpn_box_reg: 0.0048 (0.0075) time: 1.8666 (1.7984) data: 0.0098 (0.0234) lr: 0.002867 max mem: 3984\n", + "2019-06-02 18:31:31,686 maskrcnn_benchmark.trainer INFO: eta: 0:24:04 iter: 200 loss: 0.3353 (0.7811) loss_classifier: 0.1224 (0.2217) loss_box_reg: 0.0405 (0.1266) loss_mask: 0.1575 (0.4177) loss_objectness: 0.0031 (0.0079) loss_rpn_box_reg: 0.0040 (0.0072) time: 1.8499 (1.8053) data: 0.0098 (0.0220) lr: 0.003000 max mem: 3984\n", + "2019-06-02 18:31:31,694 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000200.pth\n", + "2019-06-02 18:32:11,665 maskrcnn_benchmark.trainer INFO: eta: 0:23:41 iter: 220 loss: 0.2797 (0.7377) loss_classifier: 0.1134 (0.2118) loss_box_reg: 0.0337 (0.1184) loss_mask: 0.1281 (0.3930) loss_objectness: 0.0016 (0.0075) loss_rpn_box_reg: 0.0047 (0.0070) time: 1.9676 (1.8229) data: 0.0099 (0.0274) lr: 0.003133 max mem: 4045\n", + "2019-06-02 18:32:48,993 maskrcnn_benchmark.trainer INFO: eta: 0:23:08 iter: 240 loss: 0.2811 (0.7005) loss_classifier: 0.0951 (0.2025) loss_box_reg: 0.0390 (0.1120) loss_mask: 0.1331 (0.3720) loss_objectness: 0.0030 (0.0072) loss_rpn_box_reg: 0.0035 (0.0067) time: 1.8686 (1.8265) data: 0.0100 (0.0260) lr: 0.003267 max mem: 4045\n", + "2019-06-02 18:33:26,674 maskrcnn_benchmark.trainer INFO: eta: 0:22:34 iter: 260 loss: 0.2268 (0.6662) loss_classifier: 0.0845 (0.1937) loss_box_reg: 0.0259 (0.1057) loss_mask: 0.0996 (0.3535) loss_objectness: 0.0015 (0.0069) loss_rpn_box_reg: 0.0028 (0.0064) time: 1.7613 (1.8310) data: 0.0101 (0.0248) lr: 0.003400 max mem: 4045\n", + "2019-06-02 18:34:05,409 maskrcnn_benchmark.trainer INFO: eta: 0:22:03 iter: 280 loss: 0.2316 (0.6376) loss_classifier: 0.0758 (0.1859) loss_box_reg: 0.0310 (0.1005) loss_mask: 0.1212 (0.3385) loss_objectness: 0.0011 (0.0065) loss_rpn_box_reg: 0.0032 (0.0063) time: 1.9362 (1.8385) data: 0.0101 (0.0238) lr: 0.003533 max mem: 4045\n", + "2019-06-02 18:34:45,986 maskrcnn_benchmark.trainer INFO: eta: 0:21:35 iter: 300 loss: 0.1890 (0.6097) loss_classifier: 0.0672 (0.1786) loss_box_reg: 0.0266 (0.0958) loss_mask: 0.1022 (0.3229) loss_objectness: 0.0011 (0.0063) loss_rpn_box_reg: 0.0026 (0.0061) time: 1.9844 (1.8512) data: 0.0101 (0.0229) lr: 0.003667 max mem: 4045\n", + "2019-06-02 18:34:45,996 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000300.pth\n", + "2019-06-02 18:35:26,169 maskrcnn_benchmark.trainer INFO: eta: 0:21:05 iter: 320 loss: 0.1838 (0.5837) loss_classifier: 0.0716 (0.1717) loss_box_reg: 0.0230 (0.0915) loss_mask: 0.0802 (0.3086) loss_objectness: 0.0009 (0.0060) loss_rpn_box_reg: 0.0028 (0.0059) time: 2.0302 (1.8611) data: 0.0101 (0.0273) lr: 0.003800 max mem: 4045\n", + "2019-06-02 18:36:03,978 maskrcnn_benchmark.trainer INFO: eta: 0:20:29 iter: 340 loss: 0.1670 (0.5607) loss_classifier: 0.0494 (0.1650) loss_box_reg: 0.0220 (0.0876) loss_mask: 0.0916 (0.2968) loss_objectness: 0.0006 (0.0057) loss_rpn_box_reg: 0.0013 (0.0056) time: 1.7739 (1.8628) data: 0.0102 (0.0263) lr: 0.003933 max mem: 4045\n", + "2019-06-02 18:36:45,869 maskrcnn_benchmark.trainer INFO: eta: 0:20:00 iter: 360 loss: 0.2075 (0.5412) loss_classifier: 0.0643 (0.1597) loss_box_reg: 0.0332 (0.0845) loss_mask: 0.0991 (0.2861) loss_objectness: 0.0008 (0.0055) loss_rpn_box_reg: 0.0019 (0.0055) time: 2.0951 (1.8757) data: 0.0102 (0.0254) lr: 0.004067 max mem: 4069\n", + "2019-06-02 18:37:25,994 maskrcnn_benchmark.trainer INFO: eta: 0:19:27 iter: 380 loss: 0.1814 (0.5235) loss_classifier: 0.0546 (0.1546) loss_box_reg: 0.0247 (0.0816) loss_mask: 0.0975 (0.2768) loss_objectness: 0.0005 (0.0052) loss_rpn_box_reg: 0.0023 (0.0053) time: 2.0547 (1.8826) data: 0.0103 (0.0246) lr: 0.004200 max mem: 4069\n", + "2019-06-02 18:38:05,156 maskrcnn_benchmark.trainer INFO: eta: 0:18:51 iter: 400 loss: 0.1724 (0.5063) loss_classifier: 0.0503 (0.1496) loss_box_reg: 0.0215 (0.0787) loss_mask: 0.0902 (0.2679) loss_objectness: 0.0002 (0.0050) loss_rpn_box_reg: 0.0015 (0.0051) time: 1.9256 (1.8863) data: 0.0103 (0.0240) lr: 0.004333 max mem: 4069\n", + "2019-06-02 18:38:05,159 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000400.pth\n", + "2019-06-02 18:38:47,340 maskrcnn_benchmark.trainer INFO: eta: 0:18:20 iter: 420 loss: 0.1519 (0.4901) loss_classifier: 0.0508 (0.1448) loss_box_reg: 0.0172 (0.0760) loss_mask: 0.0781 (0.2595) loss_objectness: 0.0001 (0.0048) loss_rpn_box_reg: 0.0017 (0.0050) time: 2.1132 (1.8969) data: 0.0103 (0.0267) lr: 0.004467 max mem: 4069\n", + "2019-06-02 18:39:27,212 maskrcnn_benchmark.trainer INFO: eta: 0:17:44 iter: 440 loss: 0.1524 (0.4753) loss_classifier: 0.0423 (0.1405) loss_box_reg: 0.0227 (0.0737) loss_mask: 0.0650 (0.2516) loss_objectness: 0.0003 (0.0047) loss_rpn_box_reg: 0.0021 (0.0049) time: 1.9556 (1.9013) data: 0.0104 (0.0260) lr: 0.004600 max mem: 4085\n", + "2019-06-02 18:40:07,888 maskrcnn_benchmark.trainer INFO: eta: 0:17:09 iter: 460 loss: 0.1374 (0.4616) loss_classifier: 0.0361 (0.1365) loss_box_reg: 0.0201 (0.0715) loss_mask: 0.0701 (0.2443) loss_objectness: 0.0003 (0.0045) loss_rpn_box_reg: 0.0018 (0.0048) time: 1.9376 (1.9071) data: 0.0103 (0.0253) lr: 0.004733 max mem: 4085\n", + "2019-06-02 18:40:49,738 maskrcnn_benchmark.trainer INFO: eta: 0:16:35 iter: 480 loss: 0.1339 (0.4482) loss_classifier: 0.0375 (0.1325) loss_box_reg: 0.0204 (0.0694) loss_mask: 0.0691 (0.2373) loss_objectness: 0.0001 (0.0043) loss_rpn_box_reg: 0.0012 (0.0046) time: 2.0188 (1.9148) data: 0.0103 (0.0247) lr: 0.004867 max mem: 4085\n", + "2019-06-02 18:41:30,488 maskrcnn_benchmark.trainer INFO: eta: 0:15:59 iter: 500 loss: 0.1137 (0.4359) loss_classifier: 0.0400 (0.1293) loss_box_reg: 0.0183 (0.0675) loss_mask: 0.0510 (0.2304) loss_objectness: 0.0006 (0.0042) loss_rpn_box_reg: 0.0016 (0.0045) time: 2.0013 (1.9197) data: 0.0105 (0.0242) lr: 0.005000 max mem: 4085\n", + "2019-06-02 18:41:30,498 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000500.pth\n", + "2019-06-02 18:42:14,193 maskrcnn_benchmark.trainer INFO: eta: 0:15:26 iter: 520 loss: 0.1359 (0.4249) loss_classifier: 0.0486 (0.1262) loss_box_reg: 0.0196 (0.0658) loss_mask: 0.0663 (0.2243) loss_objectness: 0.0002 (0.0041) loss_rpn_box_reg: 0.0016 (0.0044) time: 2.0601 (1.9299) data: 0.0104 (0.0268) lr: 0.005000 max mem: 4085\n", + "2019-06-02 18:42:55,547 maskrcnn_benchmark.trainer INFO: eta: 0:14:50 iter: 540 loss: 0.1428 (0.4143) loss_classifier: 0.0347 (0.1230) loss_box_reg: 0.0164 (0.0641) loss_mask: 0.0742 (0.2189) loss_objectness: 0.0001 (0.0039) loss_rpn_box_reg: 0.0011 (0.0043) time: 2.0419 (1.9350) data: 0.0104 (0.0262) lr: 0.005000 max mem: 4085\n", + "2019-06-02 18:43:36,910 maskrcnn_benchmark.trainer INFO: eta: 0:14:13 iter: 560 loss: 0.1221 (0.4041) loss_classifier: 0.0331 (0.1200) loss_box_reg: 0.0159 (0.0625) loss_mask: 0.0595 (0.2136) loss_objectness: 0.0003 (0.0038) loss_rpn_box_reg: 0.0011 (0.0042) time: 2.0205 (1.9398) data: 0.0106 (0.0257) lr: 0.005000 max mem: 4085\n", + "2019-06-02 18:44:16,298 maskrcnn_benchmark.trainer INFO: eta: 0:13:35 iter: 580 loss: 0.0916 (0.3943) loss_classifier: 0.0323 (0.1171) loss_box_reg: 0.0120 (0.0609) loss_mask: 0.0499 (0.2084) loss_objectness: 0.0002 (0.0037) loss_rpn_box_reg: 0.0012 (0.0041) time: 1.9197 (1.9408) data: 0.0105 (0.0252) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:44:56,001 maskrcnn_benchmark.trainer INFO: eta: 0:12:56 iter: 600 loss: 0.1242 (0.3854) loss_classifier: 0.0348 (0.1145) loss_box_reg: 0.0146 (0.0594) loss_mask: 0.0651 (0.2037) loss_objectness: 0.0002 (0.0036) loss_rpn_box_reg: 0.0011 (0.0041) time: 1.9603 (1.9423) data: 0.0103 (0.0247) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:44:56,008 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000600.pth\n", + "2019-06-02 18:45:33,872 maskrcnn_benchmark.trainer INFO: eta: 0:12:17 iter: 620 loss: 0.0824 (0.3761) loss_classifier: 0.0214 (0.1118) loss_box_reg: 0.0079 (0.0579) loss_mask: 0.0436 (0.1989) loss_objectness: 0.0001 (0.0035) loss_rpn_box_reg: 0.0006 (0.0040) time: 1.7794 (1.9407) data: 0.0104 (0.0271) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:46:15,480 maskrcnn_benchmark.trainer INFO: eta: 0:11:40 iter: 640 loss: 0.1025 (0.3676) loss_classifier: 0.0313 (0.1093) loss_box_reg: 0.0117 (0.0565) loss_mask: 0.0525 (0.1945) loss_objectness: 0.0002 (0.0035) loss_rpn_box_reg: 0.0008 (0.0039) time: 1.9964 (1.9451) data: 0.0107 (0.0266) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:46:54,442 maskrcnn_benchmark.trainer INFO: eta: 0:11:01 iter: 660 loss: 0.0961 (0.3595) loss_classifier: 0.0282 (0.1068) loss_box_reg: 0.0091 (0.0551) loss_mask: 0.0556 (0.1904) loss_objectness: 0.0001 (0.0034) loss_rpn_box_reg: 0.0007 (0.0038) time: 1.9556 (1.9452) data: 0.0104 (0.0261) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:47:33,371 maskrcnn_benchmark.trainer INFO: eta: 0:10:22 iter: 680 loss: 0.0902 (0.3517) loss_classifier: 0.0275 (0.1045) loss_box_reg: 0.0073 (0.0538) loss_mask: 0.0446 (0.1863) loss_objectness: 0.0001 (0.0033) loss_rpn_box_reg: 0.0010 (0.0037) time: 1.9129 (1.9452) data: 0.0105 (0.0257) lr: 0.005000 max mem: 4093\n", + "2019-06-02 18:48:12,252 maskrcnn_benchmark.trainer INFO: eta: 0:09:43 iter: 700 loss: 0.0981 (0.3448) loss_classifier: 0.0275 (0.1024) loss_box_reg: 0.0103 (0.0527) loss_mask: 0.0611 (0.1829) loss_objectness: 0.0001 (0.0032) loss_rpn_box_reg: 0.0014 (0.0036) time: 1.9010 (1.9452) data: 0.0105 (0.0253) lr: 0.000500 max mem: 4093\n", + "2019-06-02 18:48:12,255 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000700.pth\n", + "2019-06-02 18:48:52,993 maskrcnn_benchmark.trainer INFO: eta: 0:09:05 iter: 720 loss: 0.0963 (0.3379) loss_classifier: 0.0256 (0.1003) loss_box_reg: 0.0104 (0.0516) loss_mask: 0.0569 (0.1793) loss_objectness: 0.0001 (0.0031) loss_rpn_box_reg: 0.0008 (0.0036) time: 2.0065 (1.9477) data: 0.0104 (0.0275) lr: 0.000500 max mem: 4093\n", + "2019-06-02 18:49:32,413 maskrcnn_benchmark.trainer INFO: eta: 0:08:26 iter: 740 loss: 0.0970 (0.3314) loss_classifier: 0.0244 (0.0984) loss_box_reg: 0.0099 (0.0505) loss_mask: 0.0453 (0.1759) loss_objectness: 0.0001 (0.0031) loss_rpn_box_reg: 0.0008 (0.0035) time: 1.9561 (1.9484) data: 0.0104 (0.0270) lr: 0.000500 max mem: 4093\n", + "2019-06-02 18:50:12,060 maskrcnn_benchmark.trainer INFO: eta: 0:07:47 iter: 760 loss: 0.0891 (0.3255) loss_classifier: 0.0305 (0.0967) loss_box_reg: 0.0089 (0.0496) loss_mask: 0.0521 (0.1728) loss_objectness: 0.0002 (0.0030) loss_rpn_box_reg: 0.0009 (0.0034) time: 1.9450 (1.9493) data: 0.0105 (0.0266) lr: 0.000500 max mem: 4093\n", + "2019-06-02 18:50:51,359 maskrcnn_benchmark.trainer INFO: eta: 0:07:08 iter: 780 loss: 0.0689 (0.3191) loss_classifier: 0.0214 (0.0948) loss_box_reg: 0.0076 (0.0485) loss_mask: 0.0436 (0.1695) loss_objectness: 0.0001 (0.0029) loss_rpn_box_reg: 0.0005 (0.0034) time: 1.9242 (1.9497) data: 0.0106 (0.0262) lr: 0.000500 max mem: 4093\n", + "2019-06-02 18:51:30,422 maskrcnn_benchmark.trainer INFO: eta: 0:06:29 iter: 800 loss: 0.0606 (0.3130) loss_classifier: 0.0171 (0.0929) loss_box_reg: 0.0068 (0.0475) loss_mask: 0.0367 (0.1664) loss_objectness: 0.0001 (0.0029) loss_rpn_box_reg: 0.0005 (0.0033) time: 1.9414 (1.9497) data: 0.0103 (0.0258) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:51:30,430 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000800.pth\n", + "2019-06-02 18:52:11,778 maskrcnn_benchmark.trainer INFO: eta: 0:05:51 iter: 820 loss: 0.0856 (0.3077) loss_classifier: 0.0259 (0.0913) loss_box_reg: 0.0090 (0.0466) loss_mask: 0.0485 (0.1637) loss_objectness: 0.0000 (0.0028) loss_rpn_box_reg: 0.0006 (0.0032) time: 2.0000 (1.9526) data: 0.0104 (0.0274) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:52:50,917 maskrcnn_benchmark.trainer INFO: eta: 0:05:12 iter: 840 loss: 0.0698 (0.3024) loss_classifier: 0.0236 (0.0897) loss_box_reg: 0.0062 (0.0458) loss_mask: 0.0450 (0.1610) loss_objectness: 0.0001 (0.0027) loss_rpn_box_reg: 0.0004 (0.0032) time: 1.9542 (1.9527) data: 0.0104 (0.0270) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:53:30,312 maskrcnn_benchmark.trainer INFO: eta: 0:04:33 iter: 860 loss: 0.0664 (0.2972) loss_classifier: 0.0175 (0.0882) loss_box_reg: 0.0065 (0.0449) loss_mask: 0.0435 (0.1583) loss_objectness: 0.0000 (0.0027) loss_rpn_box_reg: 0.0003 (0.0031) time: 1.8427 (1.9531) data: 0.0104 (0.0267) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:54:12,617 maskrcnn_benchmark.trainer INFO: eta: 0:03:54 iter: 880 loss: 0.0959 (0.2928) loss_classifier: 0.0296 (0.0869) loss_box_reg: 0.0087 (0.0442) loss_mask: 0.0573 (0.1560) loss_objectness: 0.0001 (0.0027) loss_rpn_box_reg: 0.0010 (0.0031) time: 2.1273 (1.9568) data: 0.0104 (0.0263) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:54:52,317 maskrcnn_benchmark.trainer INFO: eta: 0:03:15 iter: 900 loss: 0.0774 (0.2881) loss_classifier: 0.0200 (0.0855) loss_box_reg: 0.0065 (0.0434) loss_mask: 0.0428 (0.1535) loss_objectness: 0.0000 (0.0026) loss_rpn_box_reg: 0.0004 (0.0030) time: 1.9623 (1.9574) data: 0.0104 (0.0260) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:54:52,320 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0000900.pth\n", + "2019-06-02 18:55:35,081 maskrcnn_benchmark.trainer INFO: eta: 0:02:36 iter: 920 loss: 0.0763 (0.2840) loss_classifier: 0.0269 (0.0843) loss_box_reg: 0.0079 (0.0428) loss_mask: 0.0498 (0.1514) loss_objectness: 0.0001 (0.0026) loss_rpn_box_reg: 0.0005 (0.0030) time: 2.1085 (1.9614) data: 0.0105 (0.0274) lr: 0.000050 max mem: 4093\n", + "2019-06-02 18:56:15,652 maskrcnn_benchmark.trainer INFO: eta: 0:01:57 iter: 940 loss: 0.0823 (0.2798) loss_classifier: 0.0235 (0.0831) loss_box_reg: 0.0078 (0.0421) loss_mask: 0.0511 (0.1493) loss_objectness: 0.0001 (0.0025) loss_rpn_box_reg: 0.0007 (0.0029) time: 2.0190 (1.9628) data: 0.0102 (0.0270) lr: 0.000050 max mem: 4102\n", + "2019-06-02 18:56:55,399 maskrcnn_benchmark.trainer INFO: eta: 0:01:18 iter: 960 loss: 0.0843 (0.2758) loss_classifier: 0.0246 (0.0819) loss_box_reg: 0.0096 (0.0414) loss_mask: 0.0429 (0.1472) loss_objectness: 0.0001 (0.0025) loss_rpn_box_reg: 0.0006 (0.0029) time: 2.0232 (1.9633) data: 0.0102 (0.0267) lr: 0.000050 max mem: 4102\n", + "2019-06-02 18:57:32,252 maskrcnn_benchmark.trainer INFO: eta: 0:00:39 iter: 980 loss: 0.0525 (0.2716) loss_classifier: 0.0154 (0.0806) loss_box_reg: 0.0049 (0.0407) loss_mask: 0.0351 (0.1450) loss_objectness: 0.0000 (0.0024) loss_rpn_box_reg: 0.0004 (0.0028) time: 1.7520 (1.9608) data: 0.0103 (0.0264) lr: 0.000050 max mem: 4102\n", + "2019-06-02 18:58:10,741 maskrcnn_benchmark.trainer INFO: eta: 0:00:00 iter: 1000 loss: 0.0669 (0.2677) loss_classifier: 0.0201 (0.0795) loss_box_reg: 0.0075 (0.0400) loss_mask: 0.0417 (0.1430) loss_objectness: 0.0000 (0.0024) loss_rpn_box_reg: 0.0003 (0.0028) time: 1.8464 (1.9601) data: 0.0104 (0.0261) lr: 0.000050 max mem: 4102\n", + "2019-06-02 18:58:10,753 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_0001000.pth\n", + "2019-06-02 18:58:12,164 maskrcnn_benchmark.utils.checkpoint INFO: Saving checkpoint to shapeDir/model_final.pth\n", + "2019-06-02 18:58:14,438 maskrcnn_benchmark.trainer INFO: Total training time: 0:32:43.816510 (1.9638 s / it)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ONldqRzHUAm0", + "colab_type": "text" + }, + "source": [ + "# Evaluation\n", + "\n", + "Now after our model is trained, we would like to see how well it predicts objects in our sample images. One way to validate your model is through a standard metric called COCO mAP. This metric is used widely. Hence, we shall do this now. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s8YFliWAUG-E", + "colab_type": "text" + }, + "source": [ + "### Doing Inference\n", + "\n", + "Helper function to perform inference." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6VbvZXhWkQ-0", + "colab_type": "code", + "colab": {} + }, + "source": [ + "\n", + "def do_inference(\n", + " model,\n", + " data_loader,\n", + " dataset_name,\n", + " iou_types=(\"bbox\",),\n", + " box_only=False,\n", + " device=\"cuda\",\n", + " expected_results=(),\n", + " expected_results_sigma_tol=4,\n", + " output_folder=None,):\n", + " \n", + " # convert to a torch.device for efficiency\n", + " device = torch.device(device)\n", + " num_devices = get_world_size()\n", + " logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n", + " dataset = data_loader.dataset\n", + " logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(dataset)))\n", + " total_timer = Timer()\n", + " inference_timer = Timer()\n", + " total_timer.tic()\n", + " predictions = compute_on_dataset(model, data_loader, device, inference_timer)\n", + " \n", + " # wait for all processes to complete before measuring the time\n", + " synchronize()\n", + " total_time = total_timer.toc()\n", + " total_time_str = get_time_str(total_time)\n", + " logger.info(\n", + " \"Total run time: {} ({} s / img per device, on {} devices)\".format(\n", + " total_time_str, total_time * num_devices / len(dataset), num_devices\n", + " )\n", + " )\n", + " \n", + " total_infer_time = get_time_str(inference_timer.total_time)\n", + " logger.info(\n", + " \"Model inference time: {} ({} s / img per device, on {} devices)\".format(\n", + " total_infer_time,\n", + " inference_timer.total_time * num_devices / len(dataset),\n", + " num_devices,\n", + " )\n", + " )\n", + " \n", + " predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n", + " if not is_main_process():\n", + " return\n", + "\n", + " if output_folder:\n", + " torch.save(predictions, os.path.join(output_folder, \"predictions.pth\"))\n", + "\n", + " extra_args = dict(\n", + " box_only=box_only,\n", + " iou_types=iou_types,\n", + " expected_results=expected_results,\n", + " expected_results_sigma_tol=expected_results_sigma_tol,\n", + " )\n", + "\n", + " return coco_evaluation(dataset=dataset,\n", + " predictions=predictions,\n", + " output_folder=output_folder,\n", + " **extra_args)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YcAidp5fUccv", + "colab_type": "text" + }, + "source": [ + "### Testing Function\n", + "\n", + "Driver function to run the model test" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "rIC4k6dUd4UB", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def run_test(cfg, model, distributed, dataset):\n", + " if distributed:\n", + " model = model.module\n", + " torch.cuda.empty_cache() # TODO check if it helps\n", + " iou_types = (\"bbox\",)\n", + " \n", + " data_loaders_val = build_data_loader(cfg, dataset, is_train=False)\n", + " mkdir(\"shapeVal\")\n", + " for data_loader in data_loaders_val:\n", + " do_inference(\n", + " model,\n", + " data_loader, # For test we need this as zero\n", + " dataset_name=\"shape-val\",\n", + " iou_types=iou_types,\n", + " box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n", + " device=cfg.MODEL.DEVICE,\n", + " expected_results=cfg.TEST.EXPECTED_RESULTS,\n", + " expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n", + " output_folder=\"shapeVal\",\n", + " )\n", + " synchronize()\n", + "\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LdVLSfrAd9Mi", + "colab_type": "text" + }, + "source": [ + "### Run Evaluation" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0GAUbBC-hsUq", + "colab_type": "code", + "outputId": "6a51b2cc-d7c3-40b9-b8e7-15a96f30df8b", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 598 + } + }, + "source": [ + "cfg.merge_from_list(['TEST.IMS_PER_BATCH', 1])\n", + "\n", + "run_test(cfg, model=model, distributed=False, dataset=ShapeDataset(50))" + ], + "execution_count": 18, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n", + "2019-06-02 18:58:27,806 maskrcnn_benchmark.inference INFO: Start evaluation on shape-val dataset(50 images).\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "100%|██████████| 50/50 [00:05<00:00, 10.60it/s]" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "2019-06-02 18:58:33,089 maskrcnn_benchmark.inference INFO: Total run time: 0:00:05.282553 (0.10565105438232422 s / img per device, on 1 devices)\n", + "2019-06-02 18:58:33,090 maskrcnn_benchmark.inference INFO: Model inference time: 0:00:04.614043 (0.09228085994720459 s / img per device, on 1 devices)\n", + "2019-06-02 18:58:33,105 maskrcnn_benchmark.inference INFO: Preparing results for COCO format\n", + "2019-06-02 18:58:33,107 maskrcnn_benchmark.inference INFO: Preparing bbox results\n", + "2019-06-02 18:58:33,117 maskrcnn_benchmark.inference INFO: Evaluating predictions\n", + "Loading and preparing results...\n", + "DONE (t=0.00s)\n", + "creating index...\n", + "index created!\n", + "Running per image evaluation...\n", + "Evaluate annotation type *bbox*\n", + "DONE (t=0.04s).\n", + "Accumulating evaluation results...\n", + "DONE (t=0.02s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.885\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.976\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.957\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.885\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.714\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.908\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.908\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.908\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + "2019-06-02 18:58:33,193 maskrcnn_benchmark.inference INFO: OrderedDict([('bbox', OrderedDict([('AP', 0.8847684644516638), ('AP50', 0.9764087519863097), ('AP75', 0.956912357902457), ('APs', -1.0), ('APm', 0.8847684644516638), ('APl', -1.0)]))])\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ccHt8YMdKq6K", + "colab_type": "text" + }, + "source": [ + "# Visualise\n", + "\n", + "Another important part of validating your model is visualising the results. This is done below" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kb9VchvVzRpu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Load Trained Model\n", + "config_file = \"shapes_config.yaml\"\n", + "\n", + "cfg.merge_from_file(config_file)\n", + "# manual override some options\n", + "cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n", + "\n", + "vis_demo = COCODemo(\n", + " cfg, \n", + " min_image_size=800,\n", + " confidence_threshold=0.7)\n", + "\n", + "# Load Dataset\n", + "dataset = ShapeDataset(50)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c8b6wHAXjyE5", + "colab_type": "text" + }, + "source": [ + "## Visualise" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "StOBbFmujxIw", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "outputId": "c52ae633-20d7-4aa8-ad8a-1e1caeaf3ba5" + }, + "source": [ + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = vis_demo.run_on_opencv_image(image)\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()\n" + ], + "execution_count": 27, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X/QHPV15/vPiX4ktsAghAvLkkDg\nEN8I/8BCYBKz4DLxXlkhiM1SBG8uYCCrdV1IbNjYBrNVkqvWa3vZhXUqvmZlAxZcYiDEKbSE2CbY\nhqISZCSMAcFiBBEgRSDz0xj7WjwP5/4xPVJrND39u/vbM+9XleqZ6emePjOPvs+Zc/rbPebuAgAA\n4fi1tgMAAAB7IzkDABAYkjMAAIEhOQMAEBiSMwAAgSE5AwAQmNqSs5ktN7PHzGyLmV1S134A1Iux\nDDTP6jjP2cxmSPqJpA9L2ibpPkkfdfdHKt8ZgNowloF21FU5Hydpi7s/6e67JN0oaWVN+wJQH8Yy\n0IKZNT3vAknPxO5vk/T+xCD2n+mz5s2uKZRwHXXwkpGPb35+T3Fy1MFL9rk/bL1h6+bZ97DtRu17\n1HZl95n2vHmeL75NlliLyhJ31ufYtGnT8+7+1koCKy7XWJakA+cc7G8/6LBagwK65F9efEovv/a8\n5dmmruScysxWSVolSbPmzdIRq3+zrVBas/HcjSMfP+ra9+y17uD9YesNWzfPvodtN2rfo7Yru8+0\n583zfPFtssRaVJa4sz6HmT1VSVANiI/nt809VNdd9E8tRwSE4+wrfyf3NnUl5+2SFsXuL4yW7ebu\nayWtlaQ3LX7zRF7gO/4HfPO5D+6zLMnmcx8s/Md/2H76y7JuX2Tbottlfb605y27vwmWOpalvcfz\nkkXHTOR4BqpUV3K+T9KRZna4egP5TEn/rqZ9TYw8CTxp+2HbHnXte1KTV9q+05Ll4GP9+3mTZpb3\nYNg6dVbLY46xDLSgluTs7lNmdqGk70iaIekad99cx74A1IexDLSjtmPO7n67pNvrev5JVGUru6p9\nj6qAy7Tf09Cmbg5jGWheaxPCgDJoUwMYZ1y+EwCAwJCcAQAIDMl5Ahx17XsyzciuY7+bz32w8f2O\n2mdIx6qT4uz/vgBMLpIzAACBYUJYCx7benTiY/FKaubqszNvl3UfaZVj0nZZ9p31dZWR5fzoPBcn\naUto8QAISy3fSpXXmxa/2Sfp8p39JPbOxQ+0HsOgpmOq83SrrnvkvIc2ufuytuPIa8miY5zLdwJ7\nnH3l7+iRZzblurY2bW0AAAJDW3vCpLWnQ6jqAWDSkZwnSJ5j1lWLH2NtY+Y4AHQJbW0AAAJD5TwB\nilTMj209utLWdrxajk8C6+2nst0AwFigcgYAIDBUzkhU9eQwTpkCgGyonMfcqJb2intO14p7Tm8w\nGgBAFiRnAAACQ3IeU49tPTq1ah52O+m5AADNITkDABAYkjMAAIEhOQMAEBhOpRpDWY81D1t++wm3\njHxOrrkNAPWjcgYAIDCFk7OZLTKz75vZI2a22cw+ES0/yMzuMLPHo59zqwsXo+SZoT1qnVHrMXN7\nPDGegbCUqZynJP1Hd18i6XhJF5jZEkmXSLrT3Y+UdGd0H0DYGM9AQAonZ3ff4e73R7dflfSopAWS\nVkpaF622TtJpZYMEUC/GMxCWSiaEmdliSe+TtEHSIe6+I3roWUmHVLEPJEtrNRe5ROeKe05nctiE\nYjwD7Ss9IczM9pP0N5I+6e4/iz/m7i7JE7ZbZWYbzWzj9M+nyoYxsepIzPFtOf48WaoYzy+99nwD\nkQLjrVRyNrNZ6g3kG9z9W9Hi58xsfvT4fEk7h23r7mvdfZm7L5uxH2d0AW2rajzPnXNwMwEDY6zM\nbG2TdLWkR939ithD6yWdE90+R9KtxcNDkiwzs6v6xqm06pkKuvsYz0BYypSsH5B0lqSHzKx/8PGz\nkr4o6WYzO1/SU5LOKBcigAYwnoGAFE7O7n6PJEt4+OSizwugeYxnICxcIQwAgMCQnAEACAzJuYPq\nPH1q1HNyWhUANINzmDqkjaQ8bB9cnAQA6kXlDABAYEjOAAAEZuzb2pvPfVCSdNS172k5knLKfhVk\nlfr7o70NAPWgcgYAIDDBVs6bz32wdLXbr5rHwdTnrtPM1We3HUZuVXYuRv0+054/7f9C1zsryG/Z\nxbO18YpdbYcBDBVscq7COPzBDWGG9qh9D2ttT33uukr3kyXBj1onywe9Kj4MonuWXTxbkhpP0l38\nYFB1zP33fpi0/YzaNsv2XUBbGwCAwIx15dx1IU0CSxKfHNavmOPt96qraKAq41BdNSGtSi36fEnv\nf9r+RlXwVcfapqCS8+Bxwfj9eMtxWAszz7pp+40b3C5t30nb5dlv0fbq+jtPHbr81JPXj1w/6fG8\nunhMHMBwg0m0qcS38YpdhQ83xGPt+ocv2toAAAQmqMp5sBrNU30WrTaH7SfrLO+sVfSgfqs3abu8\nk8BGVcBJ1XTS8qJGXdZT6r2mJs57LjOxa1zOiZ9kSdXdsCoqqboatjz+vHlaqmWqtzzPV0elWEfl\nWSbOtibvtYXKGQCAwARVOedVV5V01LXvSa2A0/ad9Pjmcx/cfWz2nYv33mc8rqzW33lq6jHjYZV1\nVceZQ1H0/YtvP07nxU+aUVVVmWOlWaq1YetUvc/+siyToSalshx3nU7OVbayq9p3UpLY+8NAsa9X\nTJqhXXWLuigu64k2ZGmV5k1cWddP2nfRiUlZnm8YEvL4oa0NAEBgOl05j5O0SWCjhNaizlJBV109\n1zExEOOhSFVZVSWet1IfvI3JRXIGgEDQnkYfbW0AAAJTOjmb2Qwz+5GZ3RbdP9zMNpjZFjO7ycyC\n69H0Z+Y2OTv3qGvf09qM4PV3njp00ljS8raMumLaqPeu35IeNUkv6ffdX05Lu6eL4xndsuzi2ZXM\nZh93VbS1PyHpUUlvie5/SdKV7n6jmV0l6XxJXx31BP/frjcNPeYa/2M67NKQeY7TJq2bliyTtsuy\n76R1il5vOn4Mt39c99ST16cm2DwXJ6nSqOt/x9+bPB9Yyn644XSpVKXHc4jyztZ+/0UfHr2C935M\nX3yXZvhJiatN212Z9pl1dneeC6e0ZdR7Hb8056jti8xYT9t315SqnM1soaTfl/T16L5J+pCkfhZZ\nJ+m0MvsA0AzGMxAOc/fiG5vdIukLkvaX9OeSPibpXnf/zejxRZL+3t3fNWTbVZJWSZIOmHPMjE/8\nYevnv6ZVw1XFN2o/U5+7bneXIKnyvP2EW4L5Vqo0oy7p2df27z1Uj5z30CZ3X9bU/qoaz2+be+gx\n/+s/PV5rrIOV7bTdNXL9YdXttA2vepOWD1Nkv2n7zvp8G668Y/ftLJcXLStrdZ41lkn6Puezr/wd\nPfLMJsuzTeG2tpmdImmnu28ysw/m3d7d10paK0n29nnFPyFUIGt7vKnrQ4+TtNOqEIYqx/OSRcc0\nMp7jyWmjRv8x3qA79lm2Ubuki/ZdN2tizrvu4Hbx+PeKaYQNumNoy72JZJR1H1WvV/W2XVHmmPMH\nJJ1qZisk/YZ6x6i+LOlAM5vp7lOSFkraXj5MADVjPAMBKZyc3f1SSZdKUvRJ+8/d/Y/N7K8lnS7p\nRknnSLq1gjhrU+biH0XFJ4T1W9jxZV1pWecx6luruKxn+8ZlPKdJnejVoH4swypooI7znD8j6WIz\n2yJpnqSra9gHgGYwnoEWVHKFMHf/gaQfRLeflHRcFc8borJV3mNbj96rWu5XzGmTwMZBG5f1RH7j\nOJ5DqpgHvf+iD1M9Yx9cvrNFw87dBlCtvIl509tvq3T/x/zLKanrxGMkUUPi8p0AAARnYivntIlg\nv/3590qSHr3sx4nb04IFwtV2xTz4vFkqaImJYuiZyOScNTG3Iev5wJw3jEmXdKnGPEm5roScZV9p\nyZokPdloawMAEJiJrJzz+O3Pv3dka1vKNnP7Hf/vf+5tQ8WLCVfFlzQkXeYyrWpuslJO04/l6B37\nj7zSWKizuav8kolJupRnViRnAJ0zePnLLiXlIkJpcVf9dY1ZEvyodbJ80OvqN1XR1gYAIDATVTkX\nnQhWduZ2v6Utjb6M5aRbcc/pUnShkif+r//UcjToinGomvPO6G7DYJVadRWNvU1Mch6VmLPOzi56\n/Hkw0bwzQ0yTpP+ePcGpaWNr8A950tcKDmtBpq2rlO/AOnrH/kOXPzD/1aHrxZcP23ZwuzL7HLTp\n7bcNTdC7j7FfLE1rz+2kVm0drdyutYW7jrY2AACBmZjKOURlrs/NBVDQJYMVb9YqrMzM7n61mVYJ\nD5O1iq5qu1Gm7a6hs7mTZqyPa7u5bDegirMEmjT2ybnqC45kOf4s8fWHQBll/hAPS8p9D8x/NTVZ\nHr1j/5Ht56THk5b3l2VN0vFTrCSNPM1q2PvUpQSURf/1FP3QUXb7ttDWBgAgMGNfOQPonjKt7CwT\ntkZJ2j6pAh7Wyq5SUvsa443KGQCAwIx15VzF6VOjtk867tzfN8edgWrl/aapcTDqmHPbVwxrQhUT\nwcps35axTM4hf+sUAABpaGsDABCYsaycm8JpVUBY4qdK1TVBa9g+pfRTsOqQ1LINrZWbFE/8NKci\nX2yx8YpdnXkP8iI5A6jN8a/8yT7LpnTdXueczlx99l6PJW2XxbJVF+x+jrTzipetumCfuIYtHzSl\n64auM6Xrcu9z1PNpde/H9OeSZ2v3j0dv0J5jzyGdz5snlrJxh/S6q0BbGwCAwJh7ylXjR21sdqCk\nr0t6l3qXnz9P0mOSbpK0WNJWSWe4+0sjn+ft83zGv//9Stq/dc7QHmXUzG2p2tY2M8EnwyPnPbTJ\n3Zc1tb+qxvOSRcf4dRf9k6Q9FfC9B3y9khi7/A1Uedve/Wp649qvpH5b1STM2u6ys6/8HT3yzCbL\ns03ZyvnLkr7t7v+HpPdKelTSJZLudPcjJd0Z3a/dY1uPbi0x959/1D7S4gMCEMx4BiZd4eRsZgdI\nOlHS1ZLk7rvc/WVJKyWti1ZbJ+m0skECqBfjGQhLmQlhh0v6qaRrzey9kjZJ+oSkQ9x9R7TOs5IO\nGbaxma2StEqSdMCcEmEAqEBl4/ltcw+tP9pADU4Iy/JFG8AwZZLzTElLJf2pu28wsy9roOXl7m5m\nQw9qu/taSWul3jHnokGEdsGRLKdXcbwYAapsPC9ZdEzxiSwdN3j97TZOr8J4KHPMeZukbe6+Ibp/\ni3qD+zkzmy9J0c+d5UIE0ADGMxCQwsnZ3Z+V9IyZvTNadLKkRyStl3ROtOwcSbeWirCjmByGLmE8\nV+uB+a/u/gcUUfYiJH8q6QYzmy3pSUnnqpfwbzaz8yU9JemMkvtIFFpLG+i4VsczgD1KJWd3f0DS\nsPMwTy7zvACax3gGwtHJy3dSMQMAxhmX7wQAIDAk5xpluWoYAIRs2cWzx+5LJbqA5NwAZm4D6Lo2\nEnQXPxRUFTPJGQCAwHRuQhiTwQBMklHfSNXEt1FtvGJX7fsYB1VX+Z1Jzl1Pylku6ylV+9WSwLjp\nJ6Okr4485l9OCfprIzGell08e/eHGNraAACMqc5UzgCAaiRVd8Na2P11Bx8btjz+vEnt8GH7LtM6\nz/N8Sa+lrDpa/yTnhv3259+b2NqW+NYqoKz+MVra28PFW7CDy8s8p5SepAb3XbYFnPR8Sa8xabsQ\n0dYGACAwnaicR00GC30iGAAU4WueGvl43pnaWarbIm3fLFVo0vPGJ1Hl3Wfa8w0TerUcF3Ry7voM\n7STM3AbKyzJzW+pee3vUqVN1Kpq4qkj2RRLz4O1xQ1sbAIDABF05jzsmhwHVszWHSUpvC4corWre\ncOUdiZ2CSdKl9nRRwSbnSTnOTIsbKCeesPqJOX47nvBCbXG31cpGuGhrAwAQmOAq53GdBAagPvGK\nedhj/RZ3iJf3zFI1N3ENbanYbO2RbXbv/Zi++C5J0gw/aehq03bXXo+Ner0br9iVGmfaudxdaItT\nOQMAEJjgKmcAyOP4V/4kdZ34JLEQTrHKc4y5TNW8V1U7UMUOs7t6vWjPsmndte9zxZbnMW3Dtxms\nqPv7mtZdQyvz/r6TTqWa4Sft9RqGbRd69UxyDkSWiWFMCgP2yJKUB8Xb301NFCs62Wt3rAeUjyGe\n4DcqOSlt0L4fBGZcNLwVndSiLrtu0nZZX4M0/HW8/6IPN5KQq9oHbW0AAAJTqnI2s4sk/Yl6DZOH\nJJ0rab6kGyXNk7RJ0lnunumjxKScPjXKqHOf0ybLAWVUPZ7rNGoC2N9d8YeSpN+/+FupzxGfKNZX\nZRVdpGruV4jHK39noEqhnU8dj6epCXJtKpyczWyBpD+TtMTdf2lmN0s6U9IKSVe6+41mdpWk8yV9\ntUyQk5KYgbY0OZ7LSGtl9xNz/PaoJD0syS9bdcHu201dyCS0ZBNaYh7Ujy+0961KZdvaMyW9ycxm\nSnqzpB2SPiTplujxdZJOK7kPAM1gPAOBKFw5u/t2M/tvkp6W9EtJ31Wv7fWyu09Fq22TtGDY9ma2\nStIqSdIBc4qGAaACVY7nt809tP6Ac/i7K/4wtcWdZFT7XKqmsg6p+stTMc9627W1xfH6s+dmWm+c\nW91l2tpzJa2UdLiklyX9taTlWbd397WS1kqSvX2eD1tnUtvZaTO3gapVOZ6XLDpm6HhuU9bj0HkN\nJu+syTrERBJKYo4/f9YkLfXiD/F9LapMW/v3JP2zu//U3V+X9C1JH5B0YNQWk6SFkraXjBFA/RjP\nQEDKzNZ+WtLxZvZm9dpgJ0vaKOn7kk5Xb4bnOZJuLRskgNoFP55HTQaLTwQbJb5e1VW0NPwbsUKv\n5tIq5rqr5Kz7zlJFj9NEsTLHnDeY2S2S7pc0JelH6rW1/k7SjWb2n6NlV1cR6CTKcloVFyZBFUIe\nz2kztH3NU1qx5spCz512TLmoeJIuOvO5/xx1zpwe9dxtJuVh8iTqcWhxlzrP2d1XS1o9sPhJSceV\neV4AzWM8A+Hg8p2B47KemGRZLtF5+1uGXES5ImVb37bmMN17wNcLbdu/CEkdFWDIrewsskwY63qL\nm+QMIDhZknLW48xlpO0jS/Luv5aiSbpqXWplp8nS6g79gipJuLY2AACBoXIG0DlNVM1Z1D37u0pd\nb2VPGipnAAACQ3IGgAqkVfNFvn+6KmnHmcehah6H1xBHWxtAMNpMYFVIu0xoaJPDEC4qZwAAAkPl\nPAZ+7d+fI0l642vrWo4EKC6U06cmybi1gtPOf+7Suc9BJ2e+lQlAH4k5O2Zmdx9tbQAAAhN05Yx8\naG+ji9La2V2smLNMDGNSGEahcgaAmoz6YHH8K3/S+dnpqA/JGQCAwJCcO67/rVVx/fY2ELpxbGmj\nfWkT3rrwZRhBHHP+jdm/1BGLH9BjW49uO5TO6H9N5Btf4+sigZBx/BlFUDkDABCYICrnvn41CGC8\n0c4GRqNyBoAGMHMbeZCcAQAITFBtbQDjb1SFOO7tbCaHISsqZwAAApOanM3sGjPbaWYPx5YdZGZ3\nmNnj0c+50XIzs78wsy1m9qCZLa0zeAD5MJ6BbshSOX9D0vKBZZdIutPdj5R0Z3Rfkj4i6cjo3ypJ\nX60mTAAV+YZaGs9pk57GvaUdx+QwpElNzu5+t6QXBxavlNT/doV1kk6LLb/Oe+6VdKCZza8qWADl\nMJ6Bbig6IewQd98R3X5W0iHR7QWSnomtty1atkMDzGyVep/GNWverIJhAKhApeP5bXMP3WcHVMxA\nPqUnhLm7S/IC261192XuvmzGfkwaB0JQxXieO+fgGiIbP393xR/ywQSJiibn5/rtrejnzmj5dkmL\nYustjJYBCBfjGQhM0eS8XlL/q4/OkXRrbPnZ0SzP4yW9EmuXAQhTa+OZyhEYLsupVN+U9E+S3mlm\n28zsfElflPRhM3tc0u9F9yXpdklPStoi6WuS/u9aogZQCOM5PGkfUJi5nd/rz5478vENV97RUCTF\npR7sdfePJjx08pB1XdIFZYMCUA/GM9ANzMQCUAtmaGeX5bKekri05wTh8p0AKvXo9FYSM1ASyRkA\ngMCQnAFgzGy48o6Rk55ef/bc1ElTaBfJGQCAwDAhDAAwFsbhFKo+KmcA6IgXVt9e6fPR2g4XyRkA\ngMCQnAFgzE3C5LBxeA1xHHMG0KikC20gm35re97nVrQcCepE5QwAQGBIzgAwAbKc+9xFaW35tNcd\nKpIzAHRQ0Znb43T8eRyTch/JGQCAwDAhDEClfnvGYl13wNf5HuIK7f42qs81s794RTrrbdc2s9OM\nslT2Xa6Y+0jOAGpxLwm6EnV8TWQ/eb3/og+nrttPhm0l6Txt9nFIyn20tQEACAyVM4Da1FH1oTob\nrrwjU/UstdPqntSqWSI5A8BEiye1IolaqiZZF50lPm5JuY+2NgAAgaFyBgBIyjdRLK6tc6PHtWqW\nqJwBAAhOauVsZtdIOkXSTnd/V7Tsckl/IGmXpCcknevuL0ePXSrpfEnTkv7M3b9TU+wAcmI8I4si\nx6GbMs7VclyWtvY3JP2lpOtiy+6QdKm7T5nZlyRdKukzZrZE0pmSjpL0dkn/YGa/5e7T1YYNoKBv\niPGMHIq2uqve/6RJbWu7+92SXhxY9l13n4ru3itpYXR7paQb3f1X7v7PkrZIOq7CeAGUwHgGuqGK\nCWHnSbopur1AvcHdty1atg8zWyVplSTNmjergjAAVKD0eH7b3EPrjA8FhNaazqPLsZdRKjmb2WWS\npiTdkHdbd18raa0kvWnxm71MHADKq2o8L1l0DOM5EE21hKtIoJPavk5SODmb2cfUm1hysrv3B+N2\nSYtiqy2MlgEIGOMZCEuh5GxmyyV9WtJJ7v6L2EPrJf2VmV2h3gSSIyX9sHSUAGrDeEZZVL3Vy3Iq\n1TclfVDSwWa2TdJq9WZz/rqkO8xMku5194+7+2Yzu1nSI+q1xy5gZicQDsYz0A2pydndPzpk8dUj\n1v+8pM+XCQpAPRjPQDdwhTAAAAJDcgYAIDAkZwAAAkNyBgAgMCRnAAACQ3IGACAwtudiQC0GYfZT\nSa9Jer7tWBIcrDBjCzUuKdzYQo1L2je2w9z9rW0FU5SZvSrpsbbjSNCl338oQo1L6k5sucdyEMlZ\nksxso7svazuOYUKNLdS4pHBjCzUuKezY8gj5dRBbfqHGJY13bLS1AQAIDMkZAIDAhJSc17YdwAih\nxhZqXFK4sYUalxR2bHmE/DqILb9Q45LGOLZgjjkDAICekCpnAACgAJKzmS03s8fMbIuZXdJyLIvM\n7Ptm9oiZbTazT0TL15jZdjN7IPq3oqX4tprZQ1EMG6NlB5nZHWb2ePRzbsMxvTP2vjxgZj8zs0+2\n9Z6Z2TVmttPMHo4tG/oeWc9fRP/3HjSzpS3EdrmZ/e9o/39rZgdGyxeb2S9j799VdcZWlVDGM2O5\ncFyM5+JxVTuW3b21f5JmSHpC0hGSZkv6saQlLcYzX9LS6Pb+kn4iaYmkNZL+vM33Koppq6SDB5b9\nV0mXRLcvkfSlln+fz0o6rK33TNKJkpZKejjtPZK0QtLfSzJJx0va0EJs/1rSzOj2l2KxLY6v14V/\nIY1nxnJlv0/Gc/a4Kh3LbVfOx0na4u5PuvsuSTdKWtlWMO6+w93vj26/KulRSQvaiiejlZLWRbfX\nSTqtxVhOlvSEuz/VVgDufrekFwcWJ71HKyVd5z33SjrQzOY3GZu7f9fdp6K790paWNf+GxDMeGYs\nV4LxnCOuqsdy28l5gaRnYve3KZABZGaLJb1P0oZo0YVRu+KaNtpNEZf0XTPbZGaromWHuPuO6Paz\nkg5pJzRJ0pmSvhm7H8J7JiW/R6H9/ztPvU/+fYeb2Y/M7C4z+1dtBZVDaO+nJMZyCYzn4kqP5baT\nc5DMbD9JfyPpk+7+M0lflfQOSUdL2iHpv7cU2gnuvlTSRyRdYGYnxh/0Xg+llen3ZjZb0qmS/jpa\nFMp7tpc236NRzOwySVOSbogW7ZB0qLu/T9LFkv7KzN7SVnxdxVguhvFcXFVjue3kvF3Sotj9hdGy\n1pjZLPUG8w3u/i1Jcvfn3H3a3d+Q9DX12neNc/ft0c+dkv42iuO5fusm+rmzjdjU+yNzv7s/F8UY\nxHsWSXqPgvj/Z2Yfk3SKpD+O/tjI3X/l7i9Etzepdyz3t5qOLacg3s8+xnIpjOcCqhzLbSfn+yQd\naWaHR5/UzpS0vq1gzMwkXS3pUXe/IrY8ftzi30h6eHDbBmKbY2b792+rN/ngYfXer3Oi1c6RdGvT\nsUU+qlgLLIT3LCbpPVov6exolufxkl6JtcsaYWbLJX1a0qnu/ovY8rea2Yzo9hGSjpT0ZJOxFRDM\neGYsl8Z4zqnysVzXbLas/9SbYfcT9T5NXNZyLCeo1yJ5UNID0b8Vkq6X9FC0fL2k+S3EdoR6s19/\nLGlz/72SNE/SnZIel/QPkg5qIbY5kl6QdEBsWSvvmXp/UHZIel29Y07nJ71H6s3q/Er0f+8hScta\niG2LesfJ+v/frorW/bfR7/kBSfdL+oOmf68FX2MQ45mxXCo+xnOxuCody1whDACAwLTd1gYAAANI\nzgAABIbkDABAYEjOAAAEhuQMAEBgSM4AAASG5AwAQGBIzgAABIbkDABAYEjOAAAEhuQMAEBgSM4A\nAASG5AwAQGBIzgAABIbkDABAYEjOAAAEhuQMAEBgSM4AAASG5AwAQGBIzgAABIbkDABAYGpLzma2\n3MweM7MtZnZJXfsBUC/GMtA8c/fqn9RshqSfSPqwpG2S7pP0UXd/pPKdAagNYxlox8yanvc4SVvc\n/UlJMrMbJa2UNHRAHzRnP1904EE1hQJ004P/8szz7v7WlsPINZYlaabN9F/XrIbCA8L3K72uKZ+y\nPNvUlZwXSHomdn+bpPfHVzCzVZJWSdKCA+bq9gs+U1MoQDctvOzCp9qOQRnGsrT3eJ6tWVoy44hm\nogM64JHpJ3Nv09qEMHdf6+7L3H3ZvDn7tRUGgArEx/NMzWg7HKDz6krO2yUtit1fGC0D0C2MZaAF\ndSXn+yQdaWaHm9lsSWdKWl/TvgDUh7EMtKCWY87uPmVmF0r6jqQZkq5x98117AtAfboylu+b6oV0\n7MyjWo4EqEZdE8Lk7rdLur33kgEDAAAbUklEQVSu5wfQjC6N5fumNjeeoLv4waDqmPvPlyRtP6O2\n79L7WiWuEAYAQGBqq5wBoCmTWl0VkVblFnm+pPc/bV9pFXzVsXYJyRkAJkA8ETaV9PpJt2gbPR7r\npH0Ao60NAEBgqJwBBCupwhusokZVV4Nt1/hz5mmnlq3cBp8zbd9VV4pVP1/ZONuYvNclJGcAwUn6\nw1+2HZslIQxbp8x+h72W+6Y2Zz7eSgKbTLS1AQAIDJUzgGBkrRbztkSzPO+odYpOTEqKc9SkLCpl\nugYSyRlAhxT9Y11Fsi/yYWDwNvJp470L5YMBbW0AAAJD5QwANWm7+uqqpNn1k4TKGQCAwJCcAQCF\nlT3NbFIr4zS0tQF0TtUXsMgyGzvPRKH4ZSvzPl8oE5L6kuIZvDRnlm2zJuJRF4opclGZYftu8qIy\ny5Yty/38VM4AAASGyhlAq+5//eDdt2f4SZKk6ZQKa4afJMW2G/ZcWZYPk1Td9WMbtt9R+yj6fP3t\ndq8XWTrr+aHr163IOdvxbfvrDltetLWd9Ypvg/tu44pveZGcAQQhnnSO1eg/cEsH7vfXH1wuSTNe\nP2nI0iHrebb18mw36g/1sFilfV97fL08HzJGyZNA8rTxBxU5FJBlu65dVObR6Scz76OPtjYAAIGh\ncgYwNqqqLKsSj6etdnTIuOJbMpIzgM4LLSkPc//rB5OgO67JGfS0tQEACAzJGUCndaFq7rv/9YM7\nFS/aU7itbWaLJF0n6RBJLmmtu3/ZzA6SdJOkxZK2SjrD3V8qHyqAunRxPBdJcidc/6EaItnjnrO+\nl2k9Wtx7K3pRmaT/AzP8pN2n4yXNpp+2u0Y+Hlf2ojJNX4RkStJ/dPclko6XdIGZLZF0iaQ73f1I\nSXdG9wGEjfEMBKRw5ezuOyTtiG6/amaPSlogaaWkD0arrZP0A0mfKRUlgFp1bTznqZrrrpZH7WtU\nJT2uM7lDu6hMv0Ieus8Rqr6oTF6VzNY2s8WS3idpg6RDooEuSc+q1yYbts0qSaskacEBc6sIA0AF\nyo7n2ZpVW2xpf5SbTMRZxOPJkqjHKUlXdVGZwd951ovFhHJRmRmvn6Q39FjuOEpPCDOz/ST9jaRP\nuvvP4o+5u6t3/Gof7r7W3Ze5+7J5c/YrGwaAClQxnmdqRgORAuOtVHI2s1nqDeQb3P1b0eLnzGx+\n9Ph8STvLhQigCaGP51FV8wnXfyi4qnlQlviYyb230N6PJuMpnJzNzCRdLelRd78i9tB6SedEt8+R\ndGvx8AA0gfEMhKXMMecPSDpL0kNm9kC07LOSvijpZjM7X9JTks4oFyKABgQ7nrt2nHmUfqyTdvw5\nj9Cq5UFNTeQrM1v7HkmW8PDJRZ8XQPNCHc9preyuOuH6D6WeEz1pSbpIUl788bsrj2PrVSdmXrfO\n3xFXCAMAIDB88QWA4IxTKztJlhb3pMhTNddRLQ97/rwVdNXVM8kZQDAmISkPSmtxj/OlPrMk5bqT\ncZ79jkrYVbe4aWsDABAYkjMAtCztPO1x/DarkKvmJFniqer3RFsbQBAmsaU9idJ+z6El5EFZjklX\n0eKmcgYAIDAkZwBAI0ZVzYs/fnfwVXNc3bHS1gYQtElqZ2e5OEkXdb2VnSStxV3m+DOVMwAAgaFy\nBgDUYlwr5kGLP353rouWZEHlDAABmcTTqsZB1cfMSc4AAASG5AwAqNyktLTrQnIGACAwJGcAreGq\nYBg3VXUEmK0NAGgM7exsqJwBAAgMyRkAgMCQnAEACAzJGQCAwJROzmY2w8x+ZGa3RfcPN7MNZrbF\nzG4ys9nlwwTQBMYzEIYqKudPSHo0dv9Lkq5099+U9JKk8yvYB4BmMJ6BAJRKzma2UNLvS/p6dN8k\nfUjSLdEq6ySdVmYfAJrBeAbCUbZy/h+SPi3pjej+PEkvu/tUdH+bpAUl9wGgGYxnlMIlO6tTODmb\n2SmSdrr7poLbrzKzjWa28YXXfl40DAAVqHI8T2m64uiAyVPmCmEfkHSqma2Q9BuS3iLpy5IONLOZ\n0afthZK2D9vY3ddKWitJ711wqJeIA0B5lY3nOfYmxjNQUuHK2d0vdfeF7r5Y0pmSvufufyzp+5JO\nj1Y7R9KtpaMEUCvGMxCWOs5z/oyki81si3rHrK6uYR8AmsF4BlpQyRdfuPsPJP0guv2kpOOqeF4A\nzWtyPC+d9fzISUT3nPU9vpkKE4krhAEAEBiSMwAAgSE5AwAQmEqOOQMAqnHPWd8b+fjSWc83FAna\nROUMAEBgqJzRuAWfvWDo8u3/5SuVbttfd9TzZlknz36HPV9826T9DH3+yy4cGdOk6FeSVc3avuvs\n1ZKkk677XCXPhz3SZt9vvepELuGZEckZjUlLhAs+e0FqIk1KenVJS7ZZts+S+Jt+Xegl6aYTdBc/\nGNw3tVmSdOzMoyp5vif+5+gLyL3jP1jh7dO27RLa2gAABIbKGcFLqrj79+uqNJOq3u3/5Sup+yzT\nUs/S3h9HTV2QJOSqNW0yWJP6FXOVpu2uxOo2raLuP150+64hOaNxg4mtn4ySjhs3nayyHoceJc8x\n7MREzzHniRFSUpZ6ibnfxq4jSQ/TT7pP/E8v1J5+x3+w1ATeJbS1AQAIDJUzGpNlpvIktXQn6bVm\n1T+HN6m9nTRzuz/RapjBNnbSpKxhy+PPm9QOH7bvPK3zwap52u7aZ51+FTv4vlQ9WWtwf1Xpv6YZ\nfpIW59x2nKrhPEjOE6yK9m0Ri3ZdvveCNb37b6zZus9jb+iCfdcfkLTOG7pg+P5S1kna7pnZn9q9\nfNg6/cdRXpYk3U/QSbOuRyXsNFlnVQ/uu8w+pV4Sm+En7bOsn4QHH+uLt6ExHmhrAwAQGCpnNDLp\natGuy/XGmq29O2v23P61NYtHbvdraxZnXjev3fEM2Wf/8fg+d7ff1wyvxOMVdZosM76ZEDZ6BnfR\n6jZNlvWT9t2/nyW2eDs73vbt63cPjtVRiZOyulgtb73qREka24uR9F9fWVTOAAAEhsp5gjVxrHno\nseBYxZpUvQ6TZ11p7wo46fG05xz2eJY40o5FcxWw7PIcf44rej5zFZV42uPDqubB25oa+RTB41Ke\n5ZCcUZvBxBxvEedpVaetk5YsR22f9lg8ziwx52m9M1u7OvFkV9U1uMtIPm95+ISxfjs79RunRiQ7\ntKuqdnYfbW0AAAJD5TyGynyDUvyxst+s9IYuKD2Ja7AqTnq+uiaNVfF8/Q7CM7M/1drpa+MgrU3a\nV/W3WGVV5ipf4/odzbtfV9SiHzxVLM/ksJDPd666apZIzmMl6Q9/2eObRb5ZKet+XzrulKHLD1jx\nl5KkV26/cK9l/SQcXx73xpqtiY9VJSnmuT+8rdb9Iv34c1w/WU7bXYWPPw9LuDP8e5o+e9/Z1XHD\nZl8nOXbmUakXE0k6z7mui5AUdd/U5qGvOe1SoFuvOnH3ezaYfOOX9Ryl6GU/Q0VbGwCAwJSqnM3s\nQElfl/QuSS7pPEmPSbpJ0mJJWyWd4e4vlYoSI2VtleY9n7nMNytJxc5RHlYx971y+4W7Hx/2GMrp\nwniOV8z9Cm3Y5S7jZvhJ0lnxJb1JWftWxUnLkyXtO0vF3Bd/TUmVZfz5hnUNkirWNvTfk2O1bzV/\n7MyjNF3wizSyVtBNSmtnZz0UM0zZtvaXJX3b3U83s9mS3izps5LudPcvmtklki6R9JmS+0EJRY9v\nVpHs8xyzjSfepCQ8DIm5Mp0Zz/FjtMfqqFx/AJOSWJ7kVjQRVrHvwde+e3mhiJL139M8LfNjZ+75\nXSTFk3Sd8N1STrEKpXU9KjFXMYegcFvbzA6QdKKkqyXJ3Xe5+8uSVkpaF622TtJpZYMEUC/GMxCW\nMpXz4ZJ+KulaM3uvpE2SPiHpEHffEa3zrKRDyoWISUIV3JpOj+d4pVK0jdgV8dfX5Vneo1q+8ao0\npAuVZJmVXdXvpMyEsJnqdS6+6u7vk/Saei2v3dzd1Tt2tQ8zW2VmG81s4wuv/bxEGED3BHiFsMrG\n85Smaw92lKWznu900srj/tcP7vSHkSy/q61XnVjLqUp5ZImh6v93ZZLzNknb3H1DdP8W9Qb3c2Y2\nX5KinzuHbezua919mbsvmzdnvxJhAKhAZeN5pmY0EjAwzgq3td39WTN7xsze6e6PSTpZ0iPRv3Mk\nfTH6eWslkaK0vLO1D7305tEr+M2a/uyI8zk/l+/iIPHZ2Ent7aTH07ZrWtLr7r//oyrn3Y81+K1U\nIYznPBVgl6vFOnS91Z1lVvOwyrXqlnfRCr2O97zsbO0/lXRDNLPzSUnnqleN32xm50t6StIZJfeB\nIfZKnN673U+USWb4SdKle+5PR19vOCwJT2f86sPd6w87pWRNelKe+8PbZKsPi+23J2m29u4PAR/Z\ns+zFv3/37tsHrPjLYBK0tO/XTvZl+srI5rUynvP8Yds9E7jgH8OiSf13z7qp0HZV+cfr/yjzul1N\n1HkuMNPXdru7zve3VHJ29wckLRvy0MllnhdA8xjPQDi4fGfHPf2FPYXMdo1uWT89UPTsXv/SfdfN\net5l0fM949Wkf+4pSb3LYr6i0VXvi9pTJR/0kYd2326iWs6zjyxtfK6v3ZyuVsx9/TjyVNBS+U5D\nG8pcuKNJdb+nXL4TAIDAUDlPqNTJXhXpH0/uV8ejzP3hbYlfKgEUlbUKC6VKHmUwxqyV9P2vH9x4\n9dyF6reMul8fyXnCNJWUB9nqwzIn6LguJeuXjjtl5DdTLdp1uZ6Z/akGI5psWf54diEhj5Kn3d1U\ni7uu528z2Zd9TY9MT+XehrY2AACBoXKeIHmq5m+/+7D0lTJY/tCeajl+ylSWKlqi1Y38xqmNnVX8\ntaRV0W20uKswLOaqq+mQ3heS85jL28auKin39ZPwd245sXCi7reKSdJIk/bHepwScpIsibqLs7iH\n6Xr8o9DWBgAgMFTOY6qNFnaR/Sx/6KnMM7ppcSMJFfNwaRPGutringRUzmMoS2L+9rsP2/2vTfH9\nx1vdSeb+8LaRM6IxeUjMGEckZwAAAkNbe4xkrZhD048pT4sboGLO7nfPumnsJ4eNG5LzBAkxMcfF\n41seXTY7KUnP/eFtso+E/XqAkHD8uVtoawMAEBgq5zGQ1s4OvWIGAOyN5Nxx45qY9xyHTm5t715+\n3LuHPo7xNep4M8eaR+P4czfQ1gYAIDAkZwTPVh828hzoUM57TouDb6SqH1UzxgXJGQCAwJCcx1QI\nV/+qwji8BiA0v3vWTSO7DG1+dzJ6mBAGoBOYBIZJQuUMAEBgSiVnM7vIzDab2cNm9k0z+w0zO9zM\nNpjZFjO7ycxmVxUsJlvapLBQJoZ1FeN58tBxCFfh5GxmCyT9maRl7v4uSTMknSnpS5KudPfflPSS\npPOrCBSTa1yOn4eM8QyEpWxbe6akN5nZTElvlrRD0ock3RI9vk7SaSX3AaAZjGcgEIWTs7tvl/Tf\nJD2t3iB+RdImSS+7+1S02jZJC4Ztb2arzGyjmW184bWfFw1jIo3rVcHQnirH85SmmwgZGGtl2tpz\nJa2UdLikt0uaI2l51u3dfa27L3P3ZfPm7Fc0DAAVqHI8z9SMmqIEJkeZtvbvSfpnd/+pu78u6VuS\nPiDpwKgtJkkLJW0vGSOA+jGegYCUSc5PSzrezN5sZibpZEmPSPq+pNOjdc6RdGu5EAE0gPE8obJc\nkISLkjSvzDHnDepNFLlf0kPRc62V9BlJF5vZFknzJF1dQZwAahTyeE5LDpwOhHFU6gph7r5a0uqB\nxU9KOq7M8wJoHuMZCAdXCAMApHYgaG03i2trAwgW7WxMKipnAAACQ3IGACAwJGcAAAJDcgYAIDAk\n5w56+gtnjHx8+UNPNRRJM5Y/9NTYvSYAGIXkDABAYEjOAAAEhuQMAEBgSM4AAASGK4RhrMz94W2N\n7u+l405pfJ8Axh+VM1ACiRnj4h+v/6ORjy+d9XxDkUAiOQMAEByS85gal3ODx+E1oLhR1do/Xv9H\nqdUe0FUk546btAuS2OrDZKsPazuMXJ6Z/Sk9M/tTbYcBoENIzgAABIbZ2mjE/3n63bnW71p1DABV\nIjmPgX5r+9BLbx76eL+1/e13N5/wvnPLibnWT2vD727jz97zetNa+3VatOvyvVrWi3ZdLkm0sSvU\nP+58/+sHD338H6//I/3uWTc1GdJYYZZ2mGhrAwAQGCrnMZKlgm6jeh5ngxUyFTOAKlA5AwAQmNTk\nbGbXmNlOM3s4tuwgM7vDzB6Pfs6NlpuZ/YWZbTGzB81saZ3BY7hRx2BDPP+5H1Pm480ojPEMdEOW\nyvkbkpYPLLtE0p3ufqSkO6P7kvQRSUdG/1ZJ+mo1YSKvp79wRqYk3VaizrP/tNeCXL4hxjMQvNTk\n7O53S3pxYPFKSeui2+sknRZbfp333CvpQDObX1WwAMphPAPdUPSY8yHuviO6/aykQ6LbCyQ9E1tv\nW7RsH2a2ysw2mtnGF177ecEwUIWmq+c8+6NibkSl43lK07UEyaU8q8d7Fq7Ss7Xd3c3MC2y3VtJa\nSXrvgkNzb49snv7CGYmzt+NGJcyiM7zLJH2ScjuqGM9z7E2tjWfOec4my7nNSeeVoxlFK+fn+u2t\n6OfOaPl2SYti6y2MlgEIF+MZCEzRynm9pHMkfTH6eWts+YVmdqOk90t6JdYuQ0vSzn9O02Tbm4q5\nFYxnIDCpydnMvinpg5IONrNtklarN4hvNrPzJT0lqf8X9XZJKyRtkfQLSefWEDMKiie+oom6DiTk\n5jCeMaqlzaU6w5GanN39owkPnTxkXZd0QdmgANSD8Qx0A5fvnFBtV9FUyyhj1ISlfmXIxDB0Gcm5\n40JqT+fR1bgRjizfViWRpPv49qlu4draAAAEhsq5o5psCxetcmldowlp5+Ry7jOTwLqIyhkAgMBQ\nOSMVFTBCx/Hn4TjO3F1UzgDGRlqymZRrSadda3zprOdJzIEjOQMAEBja2gDGyqS2uLN2BaiYu4HK\nGcBYSmvdjlOLO8troZXdLSRnAAACQ1sbwFjLcqnPuK60u/NU/lTM3UNyBjD20o5Dx4V+0RKS8mSg\nrQ0AQGConAFMjLRLffaF1O4uMnGNirn7SM4AJkqeFndcPEnWmaiLziInIY8X2toAAASGyhnARMra\n4h4mtHOkqZrHD8kZwMQaltSKJuymkZDHG21tAAACQ+UMoHO6Ut3WifdgvFE5AwAQmNTK2cyukXSK\npJ3u/q5o2eWS/kDSLklPSDrX3V+OHrtU0vmSpiX9mbt/p6bYAeTU9fHc5nHWtipVji1PpiyV8zck\nLR9Ydoekd7n7eyT9RNKlkmRmSySdKemoaJv/x8xmVBYtgLK+IcZzIf1vdao7Wcb3Q2KeXKnJ2d3v\nlvTiwLLvuvtUdPdeSQuj2ysl3ejuv3L3f5a0RdJxFcYLoATGM9ANVUwIO09S/3I5C9Qb3H3bomX7\nMLNVklZJ0oID5lYQBoAKlB7PszWrzviCQEWLupWaEGZml0maknRD3m3dfa27L3P3ZfPm7FcmDAAV\nqGo8z9TEdr6ByhSunM3sY+pNLDnZ3T1avF3SothqC6NlAALGeAbCUqhyNrPlkj4t6VR3/0XsofWS\nzjSzXzezwyUdKemH5cMEUBfGMxCeLKdSfVPSByUdbGbbJK1Wbzbnr0u6w8wk6V53/7i7bzazmyU9\nol577AJ3n64reAD5MJ6BbkhNzu7+0SGLrx6x/uclfb5MUADqwXgGuoErhAEAEBiSMwAAgSE5AwAQ\nGJIzAACBITkDABAYkjMAAIGxPRcDajEIs59Kek1SqBesPVhhxhZqXFK4sYUal7RvbIe5+1vbCqYo\nM3tV0mNtx5GgS7//UIQal9Sd2HKP5SCSsySZ2UZ3X9Z2HMOEGluocUnhxhZqXFLYseUR8usgtvxC\njUsa79hoawMAEBiSMwAAgQkpOa9tO4ARQo0t1LikcGMLNS4p7NjyCPl1EFt+ocYljXFswRxzBgAA\nPSFVzgAAQCRnAACC03pyNrPlZvaYmW0xs0tajmWRmX3fzB4xs81m9olo+Roz225mD0T/VrQU31Yz\neyiKYWO07CAzu8PMHo9+zm04pnfG3pcHzOxnZvbJtt4zM7vGzHaa2cOxZUPfI+v5i+j/3oNmtrSF\n2C43s/8d7f9vzezAaPliM/tl7P27qs7YqhLKeGYsF46L8Vw8rmrHsru39k/SDElPSDpC0mxJP5a0\npMV45ktaGt3eX9JPJC2RtEbSn7f5XkUxbZV08MCy/yrpkuj2JZK+1PLv81lJh7X1nkk6UdJSSQ+n\nvUeSVkj6e0km6XhJG1qI7V9Lmhnd/lIstsXx9brwL6TxzFiu7PfJeM4eV6Vjue3K+ThJW9z9SXff\nJelGSSvbCsbdd7j7/dHtVyU9KmlBW/FktFLSuuj2OkmntRjLyZKecPen2grA3e+W9OLA4qT3aKWk\n67znXkkHmtn8JmNz9++6+1R0915JC+vafwOCGc+M5UownnPEVfVYbjs5L5D0TOz+NgUygMxssaT3\nSdoQLbowaldc00a7KeKSvmtmm8xsVbTsEHffEd1+VtIh7YQmSTpT0jdj90N4z6Tk9yi0/3/nqffJ\nv+9wM/uRmd1lZv+qraByCO39lMRYLoHxXFzpsdx2cg6Sme0n6W8kfdLdfybpq5LeIeloSTsk/feW\nQjvB3ZdK+oikC8zsxPiD3uuhtHJunJnNlnSqpL+OFoXynu2lzfdoFDO7TNKUpBuiRTskHeru75N0\nsaS/MrO3tBVfVzGWi2E8F1fVWG47OW+XtCh2f2G0rDVmNku9wXyDu39Lktz9OXefdvc3JH1NvfZd\n49x9e/Rzp6S/jeJ4rt+6iX7ubCM29f7I3O/uz0UxBvGeRZLeoyD+/5nZxySdIumPoz82cvdfufsL\n0e1N6h3L/a2mY8spiPezj7FcCuO5gCrHctvJ+T5JR5rZ4dEntTMlrW8rGDMzSVdLetTdr4gtjx+3\n+DeSHh7ctoHY5pjZ/v3b6k0+eFi99+ucaLVzJN3adGyRjyrWAgvhPYtJeo/WSzo7muV5vKRXYu2y\nRpjZckmflnSqu/8itvytZjYjun2EpCMlPdlkbAUEM54Zy6UxnnOqfCzXNZst6z/1Ztj9RL1PE5e1\nHMsJ6rVIHpT0QPRvhaTrJT0ULV8vaX4LsR2h3uzXH0va3H+vJM2TdKekxyX9g6SDWohtjqQXJB0Q\nW9bKe6beH5Qdkl5X75jT+UnvkXqzOr8S/d97SNKyFmLbot5xsv7/t6uidf9t9Ht+QNL9kv6g6d9r\nwdcYxHhmLJeKj/FcLK5KxzKX7wQAIDBtt7UBAMAAkjMAAIEhOQMAEBiSMwAAgSE5AwAQGJIzAACB\nITkDABCY/x+cDZZHF35ZBQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-Mc6KoQ2eL6I", + "colab_type": "text" + }, + "source": [ + "# Conclusion\n", + "\n", + "Looks good! Have fun training the models on your own dataset ! There are a lot of features that this demo doesn't use, like:\n", + "\n", + "1. Mixed precision training\n", + "2. Lighter object detection architectures like RetinaNet\n", + "\n", + "and so much more !" + ] + } + ] +} diff --git a/demo/shapes_pruning.ipynb b/demo/shapes_pruning.ipynb new file mode 100644 index 000000000..ce143da15 --- /dev/null +++ b/demo/shapes_pruning.ipynb @@ -0,0 +1,2679 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "shapes-pruning.ipynb", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [ + "LUQbRTRocPNN", + "aiLvxXRpDbiq", + "xnr8tbDz7WjS", + "5DC0K7tW7d-M", + "BI2ncK7kATEh", + "hbzY16ocEdrg", + "If8z4OZfDHmC", + "mOo-0LGFEAmc", + "bbCBInqHFUg7", + "tAn3omCjTFGI", + "xs_KL1R1aGSA", + "NVjPYFN1Pz6D", + "tHq9j1HENMMw" + ] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "268x1mG64rCy", + "colab_type": "text" + }, + "source": [ + "# Installation" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VNvKG2TF3Y0B", + "colab_type": "code", + "outputId": "62ad88fa-0b36-4ebe-f6f6-77e4b9c4e56f", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "%%writefile setup.sh\n", + "\n", + "# maskrcnn_benchmark and coco api dependencies\n", + "pip install ninja yacs cython matplotlib tqdm opencv-python\n", + "\n", + "# follow PyTorch installation in https://pytorch.org/get-started/locally/\n", + "# we give the instructions for CUDA 9.0\n", + "pip install -c pytorch pytorch-nightly torchvision cudatoolkit=9.0\n", + "\n", + "\n", + "git clone https://github.com/cocodataset/cocoapi.git\n", + "cd cocoapi/PythonAPI\n", + "python setup.py build_ext install\n", + "cd ../../\n", + "\n", + "# install apex\n", + "rm -rf apex\n", + "git clone https://github.com/NVIDIA/apex.git\n", + "cd apex\n", + "git pull\n", + "python setup.py install --cuda_ext --cpp_ext\n", + "cd ../\n", + "\n", + "# install PyTorch Detection\n", + "git clone https://github.com/facebookresearch/maskrcnn-benchmark.git\n", + "cd maskrcnn-benchmark\n", + "\n", + "# the following will install the lib with\n", + "# symbolic links, so that you can modify\n", + "# the files if you want and won't need to\n", + "# re-build it\n", + "python setup.py build develop\n" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing setup.sh\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NYzsp3Ng3mOy", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!sh setup.sh" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1uoPMGDl49Wk", + "colab_type": "text" + }, + "source": [ + "### Checking our Installation\n", + "\n", + "If a module not found error appears, restart the runtime. The libraries should be loaded after restarting" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3q-n76S95KA3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import maskrcnn_benchmark" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aiLvxXRpDbiq", + "colab_type": "text" + }, + "source": [ + "# Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kLzesfGNX9O2", + "colab_type": "code", + "outputId": "62004ff6-534f-4181-ca2c-d166e34f97a9", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "import torch\n", + "from torch import nn\n", + "import torch.nn.functional as Fx\n", + "import datetime\n", + "\n", + "# Set up custom environment before nearly anything else is imported\n", + "# NOTE: this should be the first import (no not reorder)\n", + "from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n", + "\n", + "from maskrcnn_benchmark.data.build import *\n", + "from maskrcnn_benchmark.structures.bounding_box import BoxList\n", + "from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\n", + "from maskrcnn_benchmark import layers as L\n", + "from maskrcnn_benchmark.utils import cv2_util\n", + "from maskrcnn_benchmark.utils.miscellaneous import mkdir\n", + "from maskrcnn_benchmark.utils.logger import setup_logger\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.config import cfg\n", + "from maskrcnn_benchmark.data import make_data_loader\n", + "from maskrcnn_benchmark.solver import make_lr_scheduler\n", + "from maskrcnn_benchmark.solver import make_optimizer\n", + "from maskrcnn_benchmark.engine.inference import inference\n", + "from maskrcnn_benchmark.engine.trainer import do_train\n", + "from maskrcnn_benchmark.modeling.detector import build_detection_model\n", + "from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\n", + "from maskrcnn_benchmark.utils.collect_env import collect_env_info\n", + "from maskrcnn_benchmark.utils.comm import synchronize, get_rank\n", + "from maskrcnn_benchmark.utils.imports import import_file\n", + "from maskrcnn_benchmark.data.datasets.evaluation import evaluate\n", + "from maskrcnn_benchmark.utils.comm import is_main_process, get_world_size\n", + "from maskrcnn_benchmark.utils.comm import all_gather\n", + "from maskrcnn_benchmark.utils.timer import Timer, get_time_str\n", + "from maskrcnn_benchmark.engine.inference import compute_on_dataset, _accumulate_predictions_from_multiple_gpus\n", + "from maskrcnn_benchmark.data.datasets.evaluation.coco import coco_evaluation\n", + "from maskrcnn_benchmark.modeling.utils import cat\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "\n", + "from maskrcnn_benchmark.modeling.backbone import build_backbone\n", + "from maskrcnn_benchmark.modeling.rpn.rpn import build_rpn\n", + "from maskrcnn_benchmark.modeling.roi_heads.roi_heads import build_roi_heads\n", + "from maskrcnn_benchmark.modeling.make_layers import make_conv3x3\n", + "from maskrcnn_benchmark.structures.image_list import to_image_list\n", + "from maskrcnn_benchmark.modeling.backbone import build_backbone\n", + "from maskrcnn_benchmark.modeling.rpn.rpn import build_rpn\n", + "from maskrcnn_benchmark.modeling.roi_heads.roi_heads import build_roi_heads\n", + "\n", + "import torch.distributed as dist\n", + "\n", + "from maskrcnn_benchmark.utils.comm import get_world_size\n", + "from maskrcnn_benchmark.utils.metric_logger import MetricLogger\n", + "\n", + "\n", + "from PIL import Image\n", + "import json\n", + "import logging\n", + "import torch\n", + "import numpy as np\n", + "import skimage.draw as draw\n", + "import tempfile\n", + "from pycocotools.coco import COCO\n", + "import os\n", + "import sys\n", + "import random\n", + "import math\n", + "import re\n", + "import time\n", + "import cv2\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "from tqdm import tqdm\n", + "\n", + "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n", + "from torchvision import transforms as T\n", + "from torchvision.transforms import functional as F\n", + "from google.colab.patches import cv2_imshow\n", + "\n", + " \n", + "logger_dir = 'log'\n", + "\n", + "if logger_dir:\n", + " mkdir(logger_dir)\n", + "\n", + "logger = setup_logger(\"maskrcnn_benchmark\", logger_dir, get_rank())\n", + "logger.info(\"Using {} GPUs\".format(1))\n" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2019-07-12 03:39:26,768 maskrcnn_benchmark INFO: Using 1 GPUs\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DvU-NYKJ3uzb", + "colab_type": "text" + }, + "source": [ + "# Loading Our Dataset\n", + "\n", + "To train a network using the MaskRCNN repo, we first need to define our dataset. The dataset needs to a class of type object and should extend 3 things. \n", + "\n", + "1. **__getitem__(self, idx)**: This function should return a PIL Image, a BoxList and the idx. The Boxlist is an abstraction for our bounding boxes, segmentation masks, class lables and also people keypoints. Please check ABSTRACTIONS.ms for more details on this. \n", + "\n", + "2. **__len__()**: returns the length of the dataset. \n", + "\n", + "3. **get_img_info(self, idx)**: Return a dict of img info with the fields \"height\" and \"width\" filled in with the idx's image's height and width.\n", + "\n", + "4. **self.coco**: Should be a variable that holds the COCO object for your annotations so that you can perform evaluations of your dataset. \n", + "\n", + "5. **self.id_to_img_map**: Is a dictionary that maps the ids to coco image ids. Almost in all cases just map the idxs to idxs. This is simply a requirement for the coco evaluation. \n", + "\n", + "6. **self.contiguous_category_id_to_json_id**: Another requirement for coco evaluation. It maps the categpry to json category id. Again, for almost all purposes category id and json id should be same. \n", + "\n", + "Given below is a sample fo a dataset. It is the Shape Dataset taken from the Matterport Mask RCNN Repo. One important detail is that the constructor if the dataset should have the variable transforms that is set inside the constructor. It should thgen be used inside **__get__item(idx)** as shown below." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xnr8tbDz7WjS", + "colab_type": "text" + }, + "source": [ + "## Helper Functions" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "tb_5MERf7c_1", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Helper Functions for the Shapes Dataset\n", + "\n", + "def non_max_suppression(boxes, scores, threshold):\n", + " \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n", + " boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n", + " scores: 1-D array of box scores.\n", + " threshold: Float. IoU threshold to use for filtering.\n", + " \"\"\"\n", + " assert boxes.shape[0] > 0\n", + " if boxes.dtype.kind != \"f\":\n", + " boxes = boxes.astype(np.float32)\n", + "\n", + " # Compute box areas\n", + " y1 = boxes[:, 0]\n", + " x1 = boxes[:, 1]\n", + " y2 = boxes[:, 2]\n", + " x2 = boxes[:, 3]\n", + " area = (y2 - y1) * (x2 - x1)\n", + "\n", + " # Get indicies of boxes sorted by scores (highest first)\n", + " ixs = scores.argsort()[::-1]\n", + "\n", + " pick = []\n", + " while len(ixs) > 0:\n", + " # Pick top box and add its index to the list\n", + " i = ixs[0]\n", + " pick.append(i)\n", + " # Compute IoU of the picked box with the rest\n", + " iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n", + " # Identify boxes with IoU over the threshold. This\n", + " # returns indices into ixs[1:], so add 1 to get\n", + " # indices into ixs.\n", + " remove_ixs = np.where(iou > threshold)[0] + 1\n", + " # Remove indices of the picked and overlapped boxes.\n", + " ixs = np.delete(ixs, remove_ixs)\n", + " ixs = np.delete(ixs, 0)\n", + " return np.array(pick, dtype=np.int32)\n", + "\n", + "def compute_iou(box, boxes, box_area, boxes_area):\n", + " \"\"\"Calculates IoU of the given box with the array of the given boxes.\n", + " box: 1D vector [y1, x1, y2, x2]\n", + " boxes: [boxes_count, (y1, x1, y2, x2)]\n", + " box_area: float. the area of 'box'\n", + " boxes_area: array of length boxes_count.\n", + " Note: the areas are passed in rather than calculated here for\n", + " efficiency. Calculate once in the caller to avoid duplicate work.\n", + " \"\"\"\n", + " # Calculate intersection areas\n", + " y1 = np.maximum(box[0], boxes[:, 0])\n", + " y2 = np.minimum(box[2], boxes[:, 2])\n", + " x1 = np.maximum(box[1], boxes[:, 1])\n", + " x2 = np.minimum(box[3], boxes[:, 3])\n", + " intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n", + " union = box_area + boxes_area[:] - intersection[:]\n", + " iou = intersection / union\n", + " return iou" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5DC0K7tW7d-M", + "colab_type": "text" + }, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WhG_Tu9ELAsj", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class ShapeDataset(object):\n", + " \n", + " def __init__(self, num_examples, transforms=None):\n", + " \n", + " self.height = 128\n", + " self.width = 128\n", + " \n", + " self.num_examples = num_examples\n", + " self.transforms = transforms # IMPORTANT, DON'T MISS\n", + " self.image_info = []\n", + " self.logger = logging.getLogger(__name__)\n", + " \n", + " # Class Names: Note that the ids start fromm 1 not 0. This repo uses the 0 index for background\n", + " self.class_names = {\"square\": 1, \"circle\": 2, \"triangle\": 3}\n", + " \n", + " # Add images\n", + " # Generate random specifications of images (i.e. color and\n", + " # list of shapes sizes and locations). This is more compact than\n", + " # actual images. Images are generated on the fly in load_image().\n", + " for i in range(num_examples):\n", + " bg_color, shapes = self.random_image(self.height, self.width)\n", + " self.image_info.append({ \"path\":None,\n", + " \"width\": self.width, \"height\": self.height,\n", + " \"bg_color\": bg_color, \"shapes\": shapes\n", + " })\n", + " \n", + " # Fills in the self.coco varibale for evaluation.\n", + " self.get_gt()\n", + " \n", + " # Variables needed for coco mAP evaluation\n", + " self.id_to_img_map = {}\n", + " for i, _ in enumerate(self.image_info):\n", + " self.id_to_img_map[i] = i\n", + "\n", + " self.contiguous_category_id_to_json_id = { 0:0 ,1:1, 2:2, 3:3 }\n", + " \n", + "\n", + " def random_shape(self, height, width):\n", + " \"\"\"Generates specifications of a random shape that lies within\n", + " the given height and width boundaries.\n", + " Returns a tuple of three valus:\n", + " * The shape name (square, circle, ...)\n", + " * Shape color: a tuple of 3 values, RGB.\n", + " * Shape dimensions: A tuple of values that define the shape size\n", + " and location. Differs per shape type.\n", + " \"\"\"\n", + " # Shape\n", + " shape = random.choice([\"square\", \"circle\", \"triangle\"])\n", + " # Color\n", + " color = tuple([random.randint(0, 255) for _ in range(3)])\n", + " # Center x, y\n", + " buffer = 20\n", + " y = random.randint(buffer, height - buffer - 1)\n", + " x = random.randint(buffer, width - buffer - 1)\n", + " # Size\n", + " s = random.randint(buffer, height//4)\n", + " return shape, color, (x, y, s)\n", + "\n", + " def random_image(self, height, width):\n", + " \"\"\"Creates random specifications of an image with multiple shapes.\n", + " Returns the background color of the image and a list of shape\n", + " specifications that can be used to draw the image.\n", + " \"\"\"\n", + " # Pick random background color\n", + " bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n", + " # Generate a few random shapes and record their\n", + " # bounding boxes\n", + " shapes = []\n", + " boxes = []\n", + " N = random.randint(1, 4)\n", + " labels = {}\n", + " for _ in range(N):\n", + " shape, color, dims = self.random_shape(height, width)\n", + " shapes.append((shape, color, dims))\n", + " x, y, s = dims\n", + " boxes.append([y-s, x-s, y+s, x+s])\n", + "\n", + " # Apply non-max suppression wit 0.3 threshold to avoid\n", + " # shapes covering each other\n", + " keep_ixs = non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n", + " shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n", + " \n", + " return bg_color, shapes\n", + " \n", + " \n", + " def draw_shape(self, image, shape, dims, color):\n", + " \"\"\"Draws a shape from the given specs.\"\"\"\n", + " # Get the center x, y and the size s\n", + " x, y, s = dims\n", + " if shape == 'square':\n", + " cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n", + " elif shape == \"circle\":\n", + " cv2.circle(image, (x, y), s, color, -1)\n", + " elif shape == \"triangle\":\n", + " points = np.array([[(x, y-s),\n", + " (x-s/math.sin(math.radians(60)), y+s),\n", + " (x+s/math.sin(math.radians(60)), y+s),\n", + " ]], dtype=np.int32)\n", + " cv2.fillPoly(image, points, color)\n", + " return image, [ x-s, y-s, x+s, y+s]\n", + "\n", + "\n", + " def load_mask(self, image_id):\n", + " \"\"\"\n", + " Generates instance masks for shapes of the given image ID.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " shapes = info['shapes']\n", + " count = len(shapes)\n", + " mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n", + " boxes = []\n", + " \n", + " for i, (shape, _, dims) in enumerate(info['shapes']):\n", + " mask[:, :, i:i+1], box = self.draw_shape( mask[:, :, i:i+1].copy(),\n", + " shape, dims, 1)\n", + " boxes.append(box)\n", + " \n", + " \n", + " # Handle occlusions\n", + " occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n", + " for i in range(count-2, -1, -1):\n", + " mask[:, :, i] = mask[:, :, i] * occlusion\n", + " occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n", + " \n", + " segmentation_mask = mask.copy()\n", + " segmentation_mask = np.expand_dims(np.sum(segmentation_mask, axis=2), axis=2)\n", + " \n", + " # Map class names to class IDs.\n", + " class_ids = np.array([self.class_names[s[0]] for s in shapes])\n", + " return segmentation_mask.astype(np.uint8), mask.astype(np.uint8), class_ids.astype(np.int32), boxes\n", + " \n", + " def load_image(self, image_id):\n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " info = self.image_info[image_id]\n", + " bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n", + " image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n", + " image = image * bg_color.astype(np.uint8)\n", + " for shape, color, dims in info['shapes']:\n", + " image, _ = self.draw_shape(image, shape, dims, color)\n", + " return image\n", + " \n", + " def __getitem__(self, idx):\n", + " \n", + " \"\"\"Generate an image from the specs of the given image ID.\n", + " Typically this function loads the image from a file, but\n", + " in this case it generates the image on the fly from the\n", + " specs in image_info.\n", + " \"\"\"\n", + " image = Image.fromarray(self.load_image(idx))\n", + " segmentation_mask, masks, labels, boxes = self.load_mask(idx)\n", + " \n", + " # create a BoxList from the boxes\n", + " boxlist = BoxList(boxes, image.size, mode=\"xyxy\")\n", + "\n", + " # add the labels to the boxlist\n", + " boxlist.add_field(\"labels\", torch.tensor(labels))\n", + "\n", + " # Add masks to the boxlist\n", + " masks = np.transpose(masks, (2,0,1))\n", + " masks = SegmentationMask(torch.tensor(masks), image.size, \"mask\")\n", + " boxlist.add_field(\"masks\", masks)\n", + " \n", + " # Add semantic segmentation masks to the boxlist for panoptic segmentation\n", + " segmentation_mask = np.transpose(segmentation_mask, (2,0,1))\n", + " seg_masks = SegmentationMask(torch.tensor(segmentation_mask), image.size, \"mask\")\n", + " boxlist.add_field(\"seg_masks\", seg_masks)\n", + " \n", + " # Important line! dont forget to add this\n", + " if self.transforms:\n", + " image, boxlist = self.transforms(image, boxlist)\n", + "\n", + " # return the image, the boxlist and the idx in your dataset\n", + " return image, boxlist, idx\n", + " \n", + " \n", + " def __len__(self):\n", + " return self.num_examples\n", + " \n", + "\n", + " def get_img_info(self, idx):\n", + " # get img_height and img_width. This is used if\n", + " # we want to split the batches according to the aspect ratio\n", + " # of the image, as it can be more efficient than loading the\n", + " # image from disk\n", + "\n", + " return {\"height\": self.height, \"width\": self.width}\n", + " \n", + " def get_gt(self):\n", + " # Prepares dataset for coco eval\n", + " \n", + " \n", + " images = []\n", + " annotations = []\n", + " results = []\n", + " \n", + " # Define categories\n", + " categories = [ {\"id\": 1, \"name\": \"square\"}, {\"id\": 2, \"name\": \"circle\"}, {\"id\": 3, \"name\": \"triangle\"}]\n", + "\n", + "\n", + " i = 1\n", + " ann_id = 0\n", + "\n", + " for img_id, d in enumerate(self.image_info):\n", + "\n", + " images.append( {\"id\": img_id, 'height': self.height, 'width': self.width } )\n", + "\n", + " for (shape, color, dims) in d['shapes']:\n", + " \n", + " if shape == \"square\":\n", + " category_id = 1\n", + " elif shape == \"circle\":\n", + " category_id = 2\n", + " elif shape == \"triangle\":\n", + " category_id = 3\n", + " \n", + " x, y, s = dims\n", + " bbox = [ x - s, y - s, x+s, y +s ] \n", + " area = (bbox[0] - bbox[2]) * (bbox[1] - bbox[3])\n", + " \n", + " # Format for COCOC\n", + " annotations.append( {\n", + " \"id\": int(ann_id),\n", + " \"category_id\": category_id,\n", + " \"image_id\": int(img_id),\n", + " \"area\" : float(area),\n", + " \"bbox\": [ float(bbox[0]), float(bbox[1]), float(bbox[2]) - float(bbox[0]) + 1, float(bbox[3]) - float(bbox[1]) + 1 ], # note that the bboxes are in x, y , width, height format\n", + " \"iscrowd\" : 0\n", + " } )\n", + "\n", + " ann_id += 1\n", + "\n", + " # Save ground truth file\n", + " \n", + " with open(\"tmp_gt.json\", \"w\") as f:\n", + " json.dump({\"images\": images, \"annotations\": annotations, \"categories\": categories }, f)\n", + "\n", + " # Load gt for coco eval\n", + " self.coco = COCO(\"tmp_gt.json\") \n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2hpTvuSp830x", + "colab_type": "text" + }, + "source": [ + "## Visualise Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BI2ncK7kATEh", + "colab_type": "text" + }, + "source": [ + "### Load" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6nsO_MRUbBpk", + "colab_type": "code", + "outputId": "1515ea47-d4bb-4014-e1d3-46b80d221f18", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 87 + } + }, + "source": [ + "train_dt = ShapeDataset(100)\n", + "im, boxlist, idx = train_dt[0]" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F9njOSX0AU5-", + "colab_type": "text" + }, + "source": [ + "### Display some sample Images" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nMXB9sAW994F", + "colab_type": "code", + "outputId": "9687b5a9-e67f-48c9-b265-a5597c85358d", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 485 + } + }, + "source": [ + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " im, boxlist, idx = train_dt[i]\n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(im)\n", + "plt.show()\n", + " " + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X+w3HV97/HnOz8gNQaSEJsbklSw\nTe1Qpy3MmYiDtR3T2ojU4NTxwjgSNXcynWKrxY7GMndw5o4zUhWrMy1OKkjoIEhRhsytbaEp1mlH\n0IARAhGIKJIYElGUXCyQkPf9Y7/HLiGHnLPf7+73s7vPx8yZ893Pfvfse7/nvM/rfD773T2RmUiS\npHLMarsASZL0fIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUmL6Fc0SsjYgHImJ3RGzq1/1I6i97\nWRq86MfrnCNiNvAg8PvAHuAbwIWZeX/jdyapb+xlqR39mjmvBnZn5sOZ+SxwA7CuT/clqX/sZakF\nc/r0dZcDj3Zd3gO8eqqdZ50wP2fPW9inUlTXacsO93S77+3r14/XeDh88AePZ+bLWi5jRr0MMG/u\ngnzpiaf0tShpmPy/Z37E04cOxkxu09pvz4jYCGwEmDXvZBaffXFbpeg4PrXpQE+3e9dHf7HhSsbL\ngdsufaTtGqaru5/nn7CYda/63y1XJJXjlp3/Z8a36dey9l5gZdflFdXYz2Xm5sycyMyJWXPn96kM\n1fG5TQf4XI/BPHl7Db3j9jI8v5/nzV0wsOKkUdWvcP4GsCoiTo+IE4ALgK19ui9J/WMvSy3oy7J2\nZh6OiPcA/wLMBq7OzPv6cV+S+sdeltrRt+ecM/PLwJf79fXVX00tSU9+HZ9/Hl72sjR4vkOYJEmF\nMZwlSSqM4aznqXuG9ot9XUnS9PguEQIMT0kqiTNnSZIKYzhrYPq1ZC5Jo8Zw1sAD04CWpBdnOEuS\nVBjDWZKkwni29hhrc3nZdw6TpKk5c5YkqTCG85jypCxJKpfhLElSYQxntcoZvCS9kCeEjRnDUJLK\n58xZkqTCGM5qnW/rKUnPZziPkdIDsPT6JGlQDGdJkgrjCWFjwBmpJA0XZ84qis8/S5LhLElScXoO\n54hYGRG3R8T9EXFfRLy3Gl8cEbdFxEPV50XNlauZchaq6bCfpbLUmTkfBt6fmWcAZwMXR8QZwCZg\nW2auArZVl6UZcXl74OxnqSA9h3Nm7svMu6vtg8AuYDmwDthS7bYFOL9ukZL6y36WytLI2doRcRpw\nJnAnsDQz91VXPQYsbeI+NDPOOtUr+1lqX+0TwiLipcAXgfdl5pPd12VmAjnF7TZGxPaI2H7k0FN1\ny1CXUQrmUXosw6CJfn760MEBVCqNtlrhHBFz6TTydZn5pWp4f0Qsq65fBhzzt2tmbs7MicycmDV3\nfp0yJDWgqX6eN3fBYAqWRlids7UDuArYlZlXdF21FVhfba8Hbum9PMmTwwbBfpbKUuc553OAdwD3\nRsSOauwvgY8CN0bEBuAR4G31SpQ0APazVJCewzkz/wOIKa5e0+vXVe+cXapX9rNUFt8hTEPDPz4k\njQvDWZKkwvhfqUaAM0pJGi3OnDVUPHNb0jgwnCVJKozhPOTGdRY5ro9b0njwOechZThJ0uhy5ixJ\nUmEMZw0tTw6TNKoM5yFkIEnSaDOcJUkqjOGsoedKgqRR49naQ8QQkqTx4MxZkqTCGM5Dwlnzi/PM\nbUmjxHCWJKkwhrNGirNnSaPAE8IKZ9hI0vhx5ixJUmEMZ40cTw6TNOwM54IZMJI0ngxnSZIKU/uE\nsIiYDWwH9mbmeRFxOnADcApwF/COzHy27v2ME2fMzZg8ju/66C+2XMnwsJ+lMjQxc34vsKvr8uXA\nJzPzV4AngA0N3IekwbCfpQLUCueIWAG8CfhsdTmA1wM3VbtsAc6vcx+SBsN+lspRd+b818AHgCPV\n5VOAn2Tm4eryHmD5sW4YERsjYntEbD9y6KmaZYwOl7Sb5zGdtkb6+elDB/tfqTTieg7niDgPOJCZ\nd/Vy+8zcnJkTmTkxa+78XsuQpsWXV724Jvt53twFDVcnjZ86J4SdA7w5Is4F5gEnAZ8CFkbEnOqv\n7RXA3vplSuoz+1kqSM8z58z8UGauyMzTgAuAf8vMtwO3A2+tdlsP3FK7yjHgzE5tsp+lsvTjdc4f\nBC6JiN10nrO6qg/3IWkw7GepBY3844vM/ArwlWr7YWB1E193HDhbHqzPbTrg656Pw36W2uc7hEmS\nVBjDWZKkwhjOLXJJux2efCepdIazJEmFMZwlSSqM4dwCl1XL4PdAUqkaeSmVZsaX8kiSXowzZ0mS\nCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGcJUkqjK9zltSam+/8QdslFO8trz617RLUAmfO\nkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhLklSYWuEcEQsj4qaI+HZE7IqI10TE4oi4\nLSIeqj4vaqrYNly0+mttlyANxDj0szQs6s6cPwX8c2b+GvCbwC5gE7AtM1cB26rLQ+ei1V/7eTB3\nb0sjbGT7WRo2PYdzRJwMvA64CiAzn83MnwDrgC3VbluA8+sWKam/7GepLHVmzqcDPwQ+FxHfjIjP\nRsR8YGlm7qv2eQxYeqwbR8TGiNgeEduPHHqqRhmSGtBYPz996OCASpZGV51wngOcBVyZmWcCT3HU\nkldmJpDHunFmbs7MicycmDV3fo0ymjfVErZL2xphjfXzvLkL+l6sNOrqhPMeYE9m3lldvolOc++P\niGUA1ecD9UqUNAD2s1SQnsM5Mx8DHo2IV1ZDa4D7ga3A+mpsPXBLrQol9Z39LJWl7r+M/FPguog4\nAXgYeBedwL8xIjYAjwBvq3kfAzOdZevJfa79+mv6XY40aCPVz9IwqxXOmbkDmDjGVWvqfF1Jg2c/\nS+XwHcIkSSqM4SxJUmEMZ3p7BzBfViVJ6pexD+c6IevbekqS+mHsw1mSpNIYzpIkFWZsw7nJJWmX\ntiVJTRrbcJYkqVSGsyRJhRnLcO7HMrRnbkuSmjKW4SxJUskMZ0mSClP3v1INlUEsO/tfqyRJdTlz\nliSpMGMTzoM+WcuTwyRJvRqbcJYkaVgYzpIkFWbkTwhrc3nZk8MkSb1w5ixJUmEMZ0mSCjPS4VzK\nGdOl1CFJGg4jHc6SJA2jWuEcEX8eEfdFxM6IuD4i5kXE6RFxZ0TsjogvRMQJTRUrqX/sZ6kcPYdz\nRCwH/gyYyMxXAbOBC4DLgU9m5q8ATwAbmih0Jkr8D1El1iRNKrmfpXFUd1l7DvALETEHeAmwD3g9\ncFN1/Rbg/Jr3IWkw7GepED2Hc2buBT4OfJ9OE/8UuAv4SWYernbbAyw/1u0jYmNEbI+I7UcOPdVr\nGS9Q+uy09Po0nprs56cPHRxEydJIq7OsvQhYB5wOnArMB9ZO9/aZuTkzJzJzYtbc+b2WIakBTfbz\nvLkL+lSlND7qLGv/HvDdzPxhZh4CvgScAyyslsUAVgB7a9Yoqf/sZ6kgdcL5+8DZEfGSiAhgDXA/\ncDvw1mqf9cAt9UqcnmE64WqYatXYKKqfpXFX5znnO+mcKHI3cG/1tTYDHwQuiYjdwCnAVQ3UKamP\n7GepLLX+8UVmXgZcdtTww8DqOl9X0uDZz1I5hv4dwoZ5iXhY627aFecs5YpzlrZdhiQVY+T/ZaTa\nNZPQnc6+l/zn/jrlSNJQGPqZsyRJo2aoZ86jsCw8+Riu/fprWq6kOf1cou7+2s6iJY0qZ86SJBVm\nqGfOKkcbJ3Q5i5Y0qoYynEdhOftow7i8XdIZ1ga1pFHisrYkSYUZunAexVlzt2F4fKW/Lrn0+iTp\neIYunNWuYQo9Q1rSsDKcJUkqzNCcEDYMy71NKfHksGGegV5xzlJPEpM0VJw567iGOZgnucQtaZgY\nzpIkFWYownmclrS7tf24R3G2OWqPR9JoGopwliRpnBjOkiQVpuiztdte1i1BW2duj/Ly7+Rj8wxu\nSaUqduZsMD+fx0OSxkex4SxJ0rgynPU8o3iG9lTG5XFKGj6GsyRJhTluOEfE1RFxICJ2do0tjojb\nIuKh6vOiajwi4tMRsTsi7omIs2Za0EWrv+bzq1Pw2KiuQfezpN5MZ+Z8DbD2qLFNwLbMXAVsqy4D\nvBFYVX1sBK5spkz12zgtZ3cbw8d9DfazVLzjhnNmfhX48VHD64At1fYW4Pyu8Wuz4w5gYUQsa6pY\nSfXYz9Jw6PV1zkszc1+1/RgwOfVYDjzatd+eamwfR4mIjXT+GmfWvJMBXy40XRet/lpR/7FKQ6/R\nfp5/wuJp3/FbXn1qD+VKo6/2CWGZmUD2cLvNmTmRmROz5s6vW4akBjTRz/PmLuhDZdJ46TWc908u\nb1WfD1Tje4GVXfutqMYklct+lgrTazhvBdZX2+uBW7rGL6rO8jwb+GnXctmUTpn/lEvaM+SZ22pQ\no/0sqb7pvJTqeuBrwCsjYk9EbAA+Cvx+RDwE/F51GeDLwMPAbuDvgD/pS9X6OQNaM2E/S8PhuCeE\nZeaFU1y15hj7JnBx3aIk9Yf9LA0H3yFMkqTCGM4joO7zz2P2JhzH5DGQVBLDWZKkwhjO4pL/3N92\nCa3zGEgqieEsSVJhDOcR4mufJWk0GM6SJBXGcJYkqTCG8whyaVuShpvhPKJ8/lmShpfhLElSYQxn\nSZIKYziPuOkubV/yn/vH8o04xvVxSyqb4SxJUmEMZ0mSCmM4j4GZnLk9Tsu84/I4JQ0fw1mSpMIY\nzpIkFcZwHiMzeVOSUV7yHaele0nDyXCWJKkwc9ouAOBHT83n2q+/pu0yJEkqgjNnSZIKc9xwjoir\nI+JAROzsGvtYRHw7Iu6JiJsjYmHXdR+KiN0R8UBE/EG/Clf/jeJzs6P2eGbKfpaGw3RmztcAa48a\nuw14VWb+BvAg8CGAiDgDuAD49eo2fxsRsxurVq0YhUAbxT80enQN9rNUvOOGc2Z+FfjxUWO3Zubh\n6uIdwIpqex1wQ2Y+k5nfBXYDqxusV1IN9rM0HJp4zvndwD9V28uBR7uu21ONvUBEbIyI7RGx/cih\npxooQ/00zDPPYa27JbX7+elDB/tcojT6aoVzRFwKHAaum+ltM3NzZk5k5sSsufPrlKEBGqagG+Y/\nKNrQVD/Pm7ug+eKkMdPzS6ki4p3AecCazMxqeC+wsmu3FdWYpILZz1JZegrniFgLfAD4ncz8WddV\nW4HPR8QVwKnAKuDrtatUUSZno1ecs7TlSo7N2fLM2M9SeY4bzhFxPfC7wJKI2ANcRudszhOB2yIC\n4I7M/OPMvC8ibgTup7M8dnFmPtev4tWu7hBsO6gN5Omxn6XhcNxwzswLjzF81Yvs/xHgI3WKktQf\n9rM0HIp4+04NvzZm0c6WJY0qw1mN62dQG8iSxoHvrS1JUmGcOauvpjPTnZxdOyuWpA7DWa0zlCXp\n+VzWliSpMPHfbwbUYhERPwSeAh5vu5YpLKHM2kqtC8qtrdS64IW1vTwzX9ZWMb2KiIPAA23XMYVh\n+v6XotS6YHhqm3EvFxHOABGxPTMn2q7jWEqtrdS6oNzaSq0Lyq5tJkp+HNY2c6XWBaNdm8vakiQV\nxnCWJKkwJYXz5rYLeBGl1lZqXVBubaXWBWXXNhMlPw5rm7lS64IRrq2Y55wlSVJHSTNnSZJEAeEc\nEWsj4oGI2B0Rm1quZWVE3B4R90fEfRHx3mr8wxGxNyJ2VB/ntlTf9yLi3qqG7dXY4oi4LSIeqj4v\nGnBNr+w6Ljsi4smIeF9bxywiro6IAxGxs2vsmMcoOj5d/ezdExFntVDbxyLi29X93xwRC6vx0yLi\nv7qO32f6WVtTSulne7nnuuzn3utqtpczs7UPYDbwHeAVwAnAt4AzWqxnGXBWtb0AeBA4A/gw8Bdt\nHquqpu8BS44a+ytgU7W9Cbi85e/nY8DL2zpmwOuAs4CdxztGwLnAPwEBnA3c2UJtbwDmVNuXd9V2\nWvd+w/BRUj/by419P+3n6dfVaC+3PXNeDezOzIcz81ngBmBdW8Vk5r7MvLvaPgjsApa3Vc80rQO2\nVNtbgPNbrGUN8J3MfKStAjLzq8CPjxqe6hitA67NjjuAhRGxbJC1ZeatmXm4ungHsKJf9z8AxfSz\nvdwI+3kGdTXdy22H83Lg0a7LeyikgSLiNOBM4M5q6D3VcsXVbSw3VRK4NSLuioiN1djSzNxXbT8G\nDOafKR/bBcD1XZdLOGYw9TEq7efv3XT+8p90ekR8MyL+PSJ+u62iZqC04wnYyzXYz72r3ctth3OR\nIuKlwBeB92Xmk8CVwC8DvwXsAz7RUmmvzcyzgDcCF0fE67qvzM4aSiun30fECcCbgX+ohko5Zs/T\n5jF6MRFxKXAYuK4a2gf8UmaeCVwCfD4iTmqrvmFlL/fGfu5dU73cdjjvBVZ2XV5RjbUmIubSaebr\nMvNLAJm5PzOfy8wjwN/RWb4buMzcW30+ANxc1bF/cumm+nygjdro/JK5OzP3VzUWccwqUx2jIn7+\nIuKdwHnA26tfNmTmM5n5o2r7LjrP5f7qoGuboSKO5yR7uRb7uQdN9nLb4fwNYFVEnF79pXYBsLWt\nYiIigKuAXZl5Rdd49/MWbwF2Hn3bAdQ2PyIWTG7TOflgJ53jtb7abT1wy6Brq1xI1xJYCcesy1TH\naCtwUXWW59nAT7uWywYiItYCHwDenJk/6xp/WUTMrrZfAawCHh5kbT0opp/t5drs5xlqvJf7dTbb\ndD/onGH3IJ2/Ji5tuZbX0lkiuQfYUX2cC/w9cG81vhVY1kJtr6Bz9uu3gPsmjxVwCrANeAj4V2Bx\nC7XNB34EnNw11soxo/MLZR9wiM5zThumOkZ0zur8m+pn715gooXadtN5nmzy5+0z1b5/VH2fdwB3\nA3846O9rj4+xiH62l2vVZz/3Vlejvew7hEmSVJi2l7UlSdJRDGdJkgpjOEuSVBjDWZKkwhjOkiQV\nxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhLklQYw1mSpMIYzpIkFcZwliSpMIazJEmFMZwl\nSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBWmb+EcEWsj4oGI2B0Rm/p1P5L6y16WBi8ys/kv\nGjEbeBD4fWAP8A3gwsy8v/E7k9Q39rLUjjl9+rqrgd2Z+TBARNwArAOO2dCnzJqdL5/dr1Kk4fTN\nw88+npkva7mMGfUywLxYlPNnnTqg8qTyPXXkBzydT8RMbtOvRFwOPNp1eQ/w6u4dImIjsBFg5azZ\n/Pvi/9GnUqThdNKB7z/Sdg1Mo5fh+f08P5bxpnlfGEx10hD4x6f/54xv09oJYZm5OTMnMnNiyazZ\nbZUhqQHd/XxiLGq7HGno9Suc9wIruy6vqMYkDRd7WWpBv8L5G8CqiDg9Ik4ALgC29um+JPWPvSy1\noC/POWfm4Yh4D/AvwGzg6sy8rx/3Jal/7GWpHX07RTozvwx8uV9fX9Jg2MvS4PkOYZIkFcZwliSp\nMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGcJUkqjOEs\nSVJhDGdJkgpjOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIK03M4R8TK\niLg9Iu6PiPsi4r3V+OKIuC0iHqo+L2quXEn9YD9LZakzcz4MvD8zzwDOBi6OiDOATcC2zFwFbKsu\nSyqb/SwVpOdwzsx9mXl3tX0Q2AUsB9YBW6rdtgDn1y1SUn/Zz1JZGnnOOSJOA84E7gSWZua+6qrH\ngKVT3GZjRGyPiO2PH3muiTIkNaBuPz+TTwykTmmU1Q7niHgp8EXgfZn5ZPd1mZlAHut2mbk5Mycy\nc2LJrNl1y5DUgCb6+USflpZqqxXOETGXTiNfl5lfqob3R8Sy6vplwIF6JUoaBPtZKkeds7UDuArY\nlZlXdF21FVhfba8Hbum9PEmDYD9LZZlT47bnAO8A7o2IHdXYXwIfBW6MiA3AI8Db6pUoaQDsZ6kg\nPYdzZv4HEFNcvabXrytp8OxnqSy+Q5gkSYUxnCVJKozhLElSYQxnSZIKYzhLklQYw1mSpMIYzpIk\nFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUmDr/MlKSNIRuffIv2i6hSG846eNtl/Bz\nzpwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTC1wzkiZkfENyPi/1aXT4+IOyNi\nd0R8ISJOqF+mpEGwn6UyNDFzfi+wq+vy5cAnM/NXgCeADQ3ch6TBsJ+lAtQK54hYAbwJ+Gx1OYDX\nAzdVu2wBzq9zH5IGw36WylF35vzXwAeAI9XlU4CfZObh6vIeYHnN+5A0GPazVIiewzkizgMOZOZd\nPd5+Y0Rsj4jtjx95rtcyJDWgyX5+Jp9ouDpp/NT5xxfnAG+OiHOBecBJwKeAhRExp/prewWw91g3\nzszNwGaAs+aemDXqkFRfY/18yuxft5+lmnqeOWfmhzJzRWaeBlwA/Ftmvh24HXhrtdt64JbaVUrq\nK/tZKks/Xuf8QeCSiNhN5zmrq/pwH5IGw36WWtDI/3POzK8AX6m2HwZWN/F1JQ2e/Sy1z3cIkySp\nMIazJEmFMZwlSSqM4SxJUmEMZ0mSCtPI2dqD9Ft/9IO2S1CXHV88te0SJGnkOHOWJKkwhrMkSYUx\nnCVJKozhLElSYQxnSZIKYzhLklQYw1mSpMIYzpIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mS\nCmM4S5JUGMNZkqTCGM6SJBWmVjhHxMKIuCkivh0RuyLiNRGxOCJui4iHqs+LmipWUv/Yz1I56s6c\nPwX8c2b+GvCbwC5gE7AtM1cB26rLkspnP0uF6DmcI+Jk4HXAVQCZ+Wxm/gRYB2ypdtsCnF+3SEn9\nZT9LZakzcz4d+CHwuYj4ZkR8NiLmA0szc1+1z2PA0rpFSuo7+1kqSJ1wngOcBVyZmWcCT3HUkldm\nJpDHunFEbIyI7RGx/fEjz9UoQ1IDGuvnZ/KJvhcrjbo64bwH2JOZd1aXb6LT3PsjYhlA9fnAsW6c\nmZszcyIzJ5bMml2jDEkNaKyfT/ScMam2nsM5Mx8DHo2IV1ZDa4D7ga3A+mpsPXBLrQol9Z39LJVl\nTs3b/ylwXUScADwMvItO4N8YERuAR4C31bwPSYNhP0uFqBXOmbkDmDjGVWvqfF1Jg2c/S+XwHcIk\nSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGcJUkqjOEsSVJhDGdJkgpj\nOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIKYzhLklQYw1mSpMIYzpIk\nFWZOnRtHxJ8D/wtI4F7gXcAy4AbgFOAu4B2Z+WzNOiX1mf08Pt5w0sfbLkHH0XM4R8Ry4M+AMzLz\nvyLiRuAC4Fzgk5l5Q0R8BtgAXNlItZL6Yhz7ecMf/G3bJYy0q/7lT9ouYajVXdaeA/xCRMwBXgLs\nA14P3FRdvwU4v+Z9SBoM+1kqRM/hnJl7gY8D36fTxD+ls+z1k8w8XO22B1h+rNtHxMaI2B4R2x8/\n8lyvZUhqQJP9/Ew+MYiSpZHWczhHxCJgHXA6cCowH1g73dtn5ubMnMjMiSWzZvdahqQGNNnPJ8ai\nPlUpjY86y9q/B3w3M3+YmYeALwHnAAurZTGAFcDemjVK6j/7WSpInXD+PnB2RLwkIgJYA9wP3A68\ntdpnPXBLvRIlDYD9LBWkznPOd9I5UeRuOi+7mAVsBj4IXBIRu+m8/OKqBuqU1Ef2s1SWWq9zzszL\ngMuOGn4YWF3n60oaPPtZKofvECZJUmEMZ0mSCmM4S5JUGMNZkqTCGM6SJBXGcJYkqTCGsyRJhTGc\nJUkqjOEsSVJhDGdJkgpjOEuSVBjDWZKkwhjOkiQVxnCWJKkwhrMkSYUxnCVJKozhLElSYQxnSZIK\nYzhLklQYw1mSpMIYzpIkFcZwliSpMMcN54i4OiIORMTOrrHFEXFbRDxUfV5UjUdEfDoidkfEPRFx\nVj+LlzQz9rM0HKYzc74GWHvU2CZgW2auArZVlwHeCKyqPjYCVzZTpqSGXIP9LBXvuOGcmV8FfnzU\n8DpgS7W9BTi/a/za7LgDWBgRy5oqVlI99rM0HHp9znlpZu6rth8Dllbby4FHu/bbU429QERsjIjt\nEbH98SPP9ViGpAY02s/P5BP9q1QaE7VPCMvMBLKH223OzInMnFgya3bdMiQ1oIl+PrHzlLWkGnoN\n5/2Ty1vV5wPV+F5gZdd+K6oxSeWyn6XC9BrOW4H11fZ64Jau8YuqszzPBn7atVwmqUz2s1SYOcfb\nISKuB34XWBIRe4DLgI8CN0bEBuAR4G3V7l8GzgV2Az8D3tWHmiX1yH6WhsNxwzkzL5ziqjXH2DeB\ni+sWJak/7GdpOPgOYZIkFcZwliSpMIazJEmFMZwlSSqM4SxJUmEMZ0mSCmM4S5JUGMNZkqTCGM6S\nJBXGcJYkqTCGsyRJhTGcJUkqzHH/8UVpdnzx1J9vL/jEm1qsZPAOvv8f2y5BkjQAzpwlSSrM0M2c\nYfxmzJMmH7czaEkabc6cJUkqjOEsSVJhhi6cx3VJu5vHQJJG29A852wgPZ/PP0vS6Bq6mbMkSaPO\ncJYkqTCGsyRJhTluOEfE1RFxICJ2do19LCK+HRH3RMTNEbGw67oPRcTuiHggIv6giSJ9vnlqHhvN\nRAn9LOn4pjNzvgZYe9TYbcCrMvM3gAeBDwFExBnABcCvV7f524iY3Vi1kuq6BvtZKt5xwzkzvwr8\n+KixWzPzcHXxDmBFtb0OuCEzn8nM7wK7gdUN1iupBvtZGg5NvJTq3cAXqu3ldJp70p5q7AUiYiOw\nEWDlrGP/Me6S7fT4sio1qHY/z49l/axPGgu1TgiLiEuBw8B1M71tZm7OzInMnFhyjHBe8Ik3cfD9\n/2jgSAPSVD+fGIuaL04aMz3PnCPincB5wJrMzGp4L7Cya7cV1ZikgtnPUll6CueIWAt8APidzPxZ\n11Vbgc9HxBXAqcAq4Ou9Fucz8WNDAAAFGklEQVSy9sy4vK1eDKqfJU3fccM5Iq4HfhdYEhF7gMvo\nnM15InBbRADckZl/nJn3RcSNwP10lscuzsznZlKQgVzf5FMC0tEG3c+SenPccM7MC48xfNWL7P8R\n4CN1ipLUH/azNBx8hzBJkgpTVDi7pN2cBZ94k8dTkoZUUeEsSZIMZ0mSilNEOM9aebJLsH3icZWk\n4VNEOEuSpP8W//1mQC0WEfFD4Cng8bZrmcISyqyt1Lqg3NpKrQteWNvLM/NlbRXTq4g4CDzQdh1T\nGKbvfylKrQuGp7YZ93IR4QwQEdszc6LtOo6l1NpKrQvKra3UuqDs2mai5MdhbTNXal0w2rW5rC1J\nUmEMZ0mSClNSOG9uu4AXUWptpdYF5dZWal1Qdm0zUfLjsLaZK7UuGOHainnOWZIkdZQ0c5YkSRjO\nkiQVp/Vwjoi1EfFAROyOiE0t17IyIm6PiPsj4r6IeG81/uGI2BsRO6qPc1uq73sRcW9Vw/ZqbHFE\n3BYRD1WfFw24pld2HZcdEfFkRLyvrWMWEVdHxIGI2Nk1dsxjFB2frn727omIs1qo7WMR8e3q/m+O\niIXV+GkR8V9dx+8z/aytKaX0s73cc132c+91NdvLmdnaBzAb+A7wCuAE4FvAGS3Wsww4q9peADwI\nnAF8GPiLNo9VVdP3gCVHjf0VsKna3gRc3vL38zHg5W0dM+B1wFnAzuMdI+Bc4J+AAM4G7myhtjcA\nc6rty7tqO617v2H4KKmf7eXGvp/28/TrarSX2545rwZ2Z+bDmfkscAOwrq1iMnNfZt5dbR8EdgHL\n26pnmtYBW6rtLcD5LdayBvhOZj7SVgGZ+VXgx0cNT3WM1gHXZscdwMKIWDbI2jLz1sw8XF28A1jR\nr/sfgGL62V5uhP08g7qa7uW2w3k58GjX5T0U0kARcRpwJnBnNfSearni6jaWmyoJ3BoRd0XExmps\naWbuq7YfA5a2UxoAFwDXd10u4ZjB1MeotJ+/d9P5y3/S6RHxzYj494j47baKmoHSjidgL9dgP/eu\ndi+3Hc5FioiXAl8E3peZTwJXAr8M/BawD/hES6W9NjPPAt4IXBwRr+u+MjtrKK28Ni4iTgDeDPxD\nNVTKMXueNo/Ri4mIS4HDwHXV0D7glzLzTOAS4PMRcVJb9Q0re7k39nPvmurltsN5L7Cy6/KKaqw1\nETGXTjNfl5lfAsjM/Zn5XGYeAf6OzvLdwGXm3urzAeDmqo79k0s31ecDbdRG55fM3Zm5v6qxiGNW\nmeoYFfHzFxHvBM4D3l79siEzn8nMH1Xbd9F5LvdXB13bDBVxPCfZy7XYzz1ospfbDudvAKsi4vTq\nL7ULgK1tFRMRAVwF7MrMK7rGu5+3eAuw8+jbDqC2+RGxYHKbzskHO+kcr/XVbuuBWwZdW+VCupbA\nSjhmXaY6RluBi6qzPM8Gftq1XDYQEbEW+ADw5sz8Wdf4yyJidrX9CmAV8PAga+tBMf1sL9dmP89Q\n473cr7PZpvtB5wy7B+n8NXFpy7W8ls4SyT3AjurjXODvgXur8a3AshZqewWds1+/Bdw3eayAU4Bt\nwEPAvwKLW6htPvAj4OSusVaOGZ1fKPuAQ3Sec9ow1TGic1bn31Q/e/cCEy3UtpvO82STP2+fqfb9\no+r7vAO4G/jDQX9fe3yMRfSzvVyrPvu5t7oa7WXfvlOSpMK0vawtSZKOYjhLklQYw1mSpMIYzpIk\nFcZwliSpMIazJEmFMZwlSSrM/wfaVvuK+BCoMwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ORQXaa6k30yD", + "colab_type": "text" + }, + "source": [ + "# Training a Model\n", + "\n", + "Now we move on to training our very own model. Here we will be finetuning the base of a Mask RCNN, modifying it to support Semantic Segmentation and change the number of classes to support this dataset. To do this we need\n", + "\n", + "1. A base model that has the same amount of output classes as our dataset. In this case, we have need for only 3 classes instead of COCO's 80. Hence , we first need to do some model trimming. \n", + "\n", + "2. Second, we need to build a Panoptic FPN model. That means attaching the semantic segmentation branch to the FPN.\n", + "\n", + "3. FInally, we write a loss function to train the semantic segmentation head.\n", + "\n", + "4. Lastly, set to train !" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SVaNqbpiAzwx", + "colab_type": "text" + }, + "source": [ + "## Model Trimming" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hbzY16ocEdrg", + "colab_type": "text" + }, + "source": [ + "### Helper Functions for Visualising Detections" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yk5a6RpsEdIt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class Resize(object):\n", + " def __init__(self, min_size, max_size):\n", + " self.min_size = min_size\n", + " self.max_size = max_size\n", + "\n", + " # modified from torchvision to add support for max size\n", + " def get_size(self, image_size):\n", + " w, h = image_size\n", + " size = self.min_size\n", + " max_size = self.max_size\n", + " if max_size is not None:\n", + " min_original_size = float(min((w, h)))\n", + " max_original_size = float(max((w, h)))\n", + " if max_original_size / min_original_size * size > max_size:\n", + " size = int(round(max_size * min_original_size / max_original_size))\n", + "\n", + " if (w <= h and w == size) or (h <= w and h == size):\n", + " return (h, w)\n", + "\n", + " if w < h:\n", + " ow = size\n", + " oh = int(size * h / w)\n", + " else:\n", + " oh = size\n", + " ow = int(size * w / h)\n", + "\n", + " return (oh, ow)\n", + "\n", + " def __call__(self, image):\n", + " size = self.get_size(image.size)\n", + " image = F.resize(image, size)\n", + " return image\n", + " \n", + " \n", + "class COCODemo(object):\n", + " \n", + " def __init__(\n", + " self,\n", + " cfg,\n", + " confidence_threshold=0.7,\n", + " show_mask_heatmaps=False,\n", + " masks_per_dim=2,\n", + " min_image_size=224,\n", + " convert_model=False\n", + " ):\n", + " self.cfg = cfg.clone()\n", + " \n", + " self.model = build_detection_network(cfg)\n", + " self.training = False\n", + "\n", + " self.model.eval()\n", + " self.device = torch.device(cfg.MODEL.DEVICE)\n", + " self.model.to(self.device)\n", + " self.min_image_size = min_image_size\n", + "\n", + " save_dir = cfg.OUTPUT_DIR\n", + " checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n", + " _ = checkpointer.load(cfg.MODEL.WEIGHT)\n", + "\n", + " self.transforms = self.build_transform()\n", + "\n", + " mask_threshold = -1 if show_mask_heatmaps else 0.5\n", + " self.masker = Masker(threshold=mask_threshold, padding=1)\n", + "\n", + " # used to make colors for each class\n", + " self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n", + "\n", + " self.cpu_device = torch.device(\"cpu\")\n", + " self.confidence_threshold = confidence_threshold\n", + " self.show_mask_heatmaps = show_mask_heatmaps\n", + " self.masks_per_dim = masks_per_dim\n", + "\n", + " def build_transform(self):\n", + " \"\"\"\n", + " Creates a basic transformation that was used to train the models\n", + " \"\"\"\n", + " cfg = self.cfg\n", + "\n", + " # we are loading images with OpenCV, so we don't need to convert them\n", + " # to BGR, they are already! So all we need to do is to normalize\n", + " # by 255 if we want to convert to BGR255 format, or flip the channels\n", + " # if we want it to be in RGB in [0-1] range.\n", + " if cfg.INPUT.TO_BGR255:\n", + " to_bgr_transform = T.Lambda(lambda x: x * 255)\n", + " else:\n", + " to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n", + "\n", + " normalize_transform = T.Normalize(\n", + " mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n", + " )\n", + " min_size = cfg.INPUT.MIN_SIZE_TEST\n", + " max_size = cfg.INPUT.MAX_SIZE_TEST\n", + " transform = T.Compose(\n", + " [\n", + " T.ToPILImage(),\n", + " Resize(min_size, max_size),\n", + " T.ToTensor(),\n", + " to_bgr_transform,\n", + " normalize_transform,\n", + " ]\n", + " )\n", + " return transform\n", + "\n", + " def run_on_opencv_image(self, image, panoptic=False, objDet=False, semantic=False):\n", + " \"\"\"\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " predictions = self.compute_prediction(image)\n", + " top_predictions = self.select_top_predictions(predictions)\n", + " \n", + " \n", + " result = image.copy()\n", + " \n", + " if self.show_mask_heatmaps:\n", + " return self.create_mask_montage(result, top_predictions)\n", + " result = self.overlay_boxes(result, top_predictions)\n", + " if self.cfg.MODEL.MASK_ON:\n", + " result = self.overlay_mask(result, top_predictions)\n", + " if self.cfg.MODEL.KEYPOINT_ON:\n", + " result = self.overlay_keypoints(result, top_predictions)\n", + " result = self.overlay_class_names(result, top_predictions)\n", + "\n", + " return result\n", + "\n", + " def compute_prediction(self, original_image):\n", + " \"\"\"\n", + " Arguments:\n", + " original_image (np.ndarray): an image as returned by OpenCV\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " # apply pre-processing to image\n", + " image = self.transforms(original_image)\n", + " # convert to an ImageList, padded so that it is divisible by\n", + " # cfg.DATALOADER.SIZE_DIVISIBILITY\n", + " image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " image_list = image_list.to(self.device)\n", + " # compute predictions\n", + " with torch.no_grad():\n", + " predictions = self.model(image_list)\n", + " predictions = [o.to(self.cpu_device) for o in predictions]\n", + "\n", + " # always single image is passed at a time\n", + " prediction = predictions[0]\n", + "\n", + " # reshape prediction (a BoxList) into the original image size\n", + " height, width = original_image.shape[:-1]\n", + " prediction = prediction.resize((width, height))\n", + "\n", + " if prediction.has_field(\"mask\"):\n", + " # if we have masks, paste the masks in the right position\n", + " # in the image, as defined by the bounding boxes\n", + " masks = prediction.get_field(\"mask\")\n", + " # always single image is passed at a time\n", + " masks = self.masker([masks], [prediction])[0]\n", + " prediction.add_field(\"mask\", masks)\n", + " return prediction\n", + "\n", + " def select_top_predictions(self, predictions):\n", + " \"\"\"\n", + " Select only predictions which have a `score` > self.confidence_threshold,\n", + " and returns the predictions in descending order of score\n", + " Arguments:\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores`.\n", + " Returns:\n", + " prediction (BoxList): the detected objects. Additional information\n", + " of the detection properties can be found in the fields of\n", + " the BoxList via `prediction.fields()`\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\")\n", + " keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n", + " predictions = predictions[keep]\n", + " scores = predictions.get_field(\"scores\")\n", + " _, idx = scores.sort(0, descending=True)\n", + " return predictions[idx]\n", + "\n", + " def compute_colors_for_labels(self, labels):\n", + " \"\"\"\n", + " Simple function that adds fixed colors depending on the class\n", + " \"\"\"\n", + " colors = labels[:, None] * self.palette\n", + " colors = (colors % 255).numpy().astype(\"uint8\")\n", + " return colors\n", + "\n", + " def overlay_boxes(self, image, predictions):\n", + " \"\"\"\n", + " Adds the predicted boxes on top of the image\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `labels`.\n", + " \"\"\"\n", + " labels = predictions.get_field(\"labels\")\n", + " boxes = predictions.bbox\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for box, color in zip(boxes, colors):\n", + " box = box.to(torch.int64)\n", + " top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n", + " image = cv2.rectangle(\n", + " image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + " def overlay_mask(self, image, predictions):\n", + " \"\"\"\n", + " Adds the instances contours for each predicted object.\n", + " Each label has a different color.\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask` and `labels`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\").numpy()\n", + " labels = predictions.get_field(\"labels\")\n", + "\n", + " colors = self.compute_colors_for_labels(labels).tolist()\n", + "\n", + " for mask, color in zip(masks, colors):\n", + " thresh = mask[0, :, :, None]\n", + " contours, hierarchy = cv2_util.findContours(\n", + " thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n", + " )\n", + " image = cv2.drawContours(image, contours, -1, color, 3)\n", + "\n", + " composite = image\n", + "\n", + " return composite\n", + "\n", + " def overlay_keypoints(self, image, predictions):\n", + " keypoints = predictions.get_field(\"keypoints\")\n", + " kps = keypoints.keypoints\n", + " scores = keypoints.get_field(\"logits\")\n", + " kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n", + " for region in kps:\n", + " image = vis_keypoints(image, region.transpose((1, 0)))\n", + " return image\n", + "\n", + " def create_mask_montage(self, image, predictions):\n", + " \"\"\"\n", + " Create a montage showing the probability heatmaps for each one one of the\n", + " detected objects\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `mask`.\n", + " \"\"\"\n", + " masks = predictions.get_field(\"mask\")\n", + " masks_per_dim = self.masks_per_dim\n", + " masks = L.interpolate(\n", + " masks.float(), scale_factor=1 / masks_per_dim\n", + " ).byte()\n", + " height, width = masks.shape[-2:]\n", + " max_masks = masks_per_dim ** 2\n", + " masks = masks[:max_masks]\n", + " # handle case where we have less detections than max_masks\n", + " if len(masks) < max_masks:\n", + " masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n", + " masks_padded[: len(masks)] = masks\n", + " masks = masks_padded\n", + " masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n", + " result = torch.zeros(\n", + " (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n", + " )\n", + " for y in range(masks_per_dim):\n", + " start_y = y * height\n", + " end_y = (y + 1) * height\n", + " for x in range(masks_per_dim):\n", + " start_x = x * width\n", + " end_x = (x + 1) * width\n", + " result[start_y:end_y, start_x:end_x] = masks[y, x]\n", + " return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n", + "\n", + " def overlay_class_names(self, image, predictions):\n", + " \"\"\"\n", + " Adds detected class names and scores in the positions defined by the\n", + " top-left corner of the predicted bounding box\n", + " Arguments:\n", + " image (np.ndarray): an image as returned by OpenCV\n", + " predictions (BoxList): the result of the computation by the model.\n", + " It should contain the field `scores` and `labels`.\n", + " \"\"\"\n", + " scores = predictions.get_field(\"scores\").tolist()\n", + " labels = predictions.get_field(\"labels\").tolist()\n", + " labels = [self.CATEGORIES[i] for i in labels]\n", + " boxes = predictions.bbox\n", + "\n", + " template = \"{}: {:.2f}\"\n", + " for box, score, label in zip(boxes, scores, labels):\n", + " x, y = box[:2]\n", + " s = template.format(label, score)\n", + " cv2.putText(\n", + " image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n", + " )\n", + "\n", + " return image\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n", + "\n", + "def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n", + " \"\"\"Visualizes keypoints (adapted from vis_one_image).\n", + " kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n", + " \"\"\"\n", + " dataset_keypoints = PersonKeypoints.NAMES\n", + " kp_lines = PersonKeypoints.CONNECTIONS\n", + "\n", + " # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n", + " cmap = plt.get_cmap('rainbow')\n", + " colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n", + " colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n", + "\n", + " # Perform the drawing on a copy of the image, to allow for blending.\n", + " kp_mask = np.copy(img)\n", + "\n", + " # Draw mid shoulder / mid hip first for better visualization.\n", + " mid_shoulder = (\n", + " kps[:2, dataset_keypoints.index('right_shoulder')] +\n", + " kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n", + " sc_mid_shoulder = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_shoulder')],\n", + " kps[2, dataset_keypoints.index('left_shoulder')])\n", + " mid_hip = (\n", + " kps[:2, dataset_keypoints.index('right_hip')] +\n", + " kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n", + " sc_mid_hip = np.minimum(\n", + " kps[2, dataset_keypoints.index('right_hip')],\n", + " kps[2, dataset_keypoints.index('left_hip')])\n", + " nose_idx = dataset_keypoints.index('nose')\n", + " if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n", + " color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n", + " if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n", + " color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n", + "\n", + " # Draw the keypoints.\n", + " for l in range(len(kp_lines)):\n", + " i1 = kp_lines[l][0]\n", + " i2 = kp_lines[l][1]\n", + " p1 = kps[0, i1], kps[1, i1]\n", + " p2 = kps[0, i2], kps[1, i2]\n", + " if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n", + " cv2.line(\n", + " kp_mask, p1, p2,\n", + " color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n", + " if kps[2, i1] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p1,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + " if kps[2, i2] > kp_thresh:\n", + " cv2.circle(\n", + " kp_mask, p2,\n", + " radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n", + "\n", + " # Blend the keypoints.\n", + " return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "If8z4OZfDHmC", + "colab_type": "text" + }, + "source": [ + "### Base Model Config\n", + "\n", + "This is the base model that we will finetune from. First we need to replace the bounding box heads and mask heads to make it compatible with our Shapes Dataset." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "wM0coO44ClbV", + "colab_type": "code", + "outputId": "f1085b00-8428-40e1-c040-e9fa451afaa2", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + } + }, + "source": [ + "%%writefile base_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"catalog://Caffe2Detectron/COCO/35858933/e2e_mask_rcnn_R-50-FPN_1x\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Overwriting base_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mOo-0LGFEAmc", + "colab_type": "text" + }, + "source": [ + "### Pretrained weight removal\n", + "\n", + "Here, the pretrained weights of bbox, mask and class predictions are removed. This is done so that we can make the model shapes dataset compatible i.e predict 3 classes instead of Coco's 81 classes." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ISFsxBxBDZcQ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def removekey(d, listofkeys):\n", + " r = dict(d)\n", + " for key in listofkeys:\n", + " print('key: {} is removed'.format(key))\n", + " r.pop(key)\n", + " return r\n", + " \n", + "\n", + "config_file = \"base_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n", + "\n", + "demo = COCODemo(\n", + " cfg, \n", + " min_image_size=800,\n", + " confidence_threshold=0.7,\n", + " convert_model=True)\n", + "\n", + "base_model = demo.model\n", + "\n", + "# Removes pretrained weights from state dict\n", + "new_state_dict = removekey(base_model.state_dict(), [ \n", + " \"roi_heads.box.predictor.cls_score.weight\", \"roi_heads.box.predictor.cls_score.bias\", \n", + " \"roi_heads.box.predictor.bbox_pred.weight\", \"roi_heads.box.predictor.bbox_pred.bias\",\n", + " \"roi_heads.mask.predictor.mask_fcn_logits.weight\", \"roi_heads.mask.predictor.mask_fcn_logits.bias\"\n", + " ])\n", + "\n", + "# Save new state dict, we will use this as our starting weights for our fine-tuned model\n", + "torch.save(new_state_dict, \"base_model.pth\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bbCBInqHFUg7", + "colab_type": "text" + }, + "source": [ + "### Fine Tuned Model Config\n", + "\n", + "Here we define our shape Dataset config. The important fields are \n", + "\n", + "1. WEIGHT: which point to our base_model.pth saved in the previous step\n", + "2. NUM_CLASSES: Which define how many classes we will predict . note that the number includes the background, hence our shapes dataset has 4 classes. \n", + "3. PANOPTIC.CHANNEL_SIZE: To set the channel size of the segmentation head of the FPN.\n", + "4. PANOPTIC.NUM_CLASSES: Number of classes of semantic segmentation head." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5AhIiTgmFXyi", + "colab_type": "code", + "outputId": "6e38f4a7-2d5c-4162-a824-0261d357d3d1", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + } + }, + "source": [ + "%%writefile shapes_config.yaml\n", + "MODEL:\n", + " META_ARCHITECTURE: \"GeneralizedRCNN\"\n", + " WEIGHT: \"base_model.pth\"\n", + " BACKBONE:\n", + " CONV_BODY: \"R-50-FPN\"\n", + " RESNETS:\n", + " BACKBONE_OUT_CHANNELS: 256\n", + " RPN:\n", + " USE_FPN: True\n", + " ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n", + " PRE_NMS_TOP_N_TRAIN: 2000\n", + " PRE_NMS_TOP_N_TEST: 1000\n", + " POST_NMS_TOP_N_TEST: 1000\n", + " FPN_POST_NMS_TOP_N_TEST: 1000\n", + " ROI_HEADS:\n", + " USE_FPN: True\n", + " ROI_BOX_HEAD:\n", + " POOLER_RESOLUTION: 7\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " POOLER_SAMPLING_RATIO: 2\n", + " FEATURE_EXTRACTOR: \"FPN2MLPFeatureExtractor\"\n", + " PREDICTOR: \"FPNPredictor\"\n", + " NUM_CLASSES: 4 # background + num_classes : IMPORTANT dont forget to add this\n", + " ROI_MASK_HEAD:\n", + " POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n", + " FEATURE_EXTRACTOR: \"MaskRCNNFPNFeatureExtractor\"\n", + " PREDICTOR: \"MaskRCNNC4Predictor\"\n", + " POOLER_RESOLUTION: 14\n", + " POOLER_SAMPLING_RATIO: 2\n", + " RESOLUTION: 28\n", + " SHARE_BOX_FEATURE_EXTRACTOR: False\n", + " MASK_ON: True\n", + "DATALOADER:\n", + " SIZE_DIVISIBILITY: 32" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing shapes_config.yaml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tAn3omCjTFGI", + "colab_type": "text" + }, + "source": [ + "### Data Loader\n", + "\n", + "This function creates a data loader with our shapes dataset. This data loader is used internally in the repo to train the model." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oODu2UpVTHXz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_data_loader(cfg, dataset, is_train=True, is_distributed=False, start_iter=0):\n", + " num_gpus = get_world_size()\n", + " if is_train:\n", + " images_per_batch = cfg.SOLVER.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = True\n", + " num_iters = cfg.SOLVER.MAX_ITER\n", + " else:\n", + " images_per_batch = cfg.TEST.IMS_PER_BATCH\n", + " assert (\n", + " images_per_batch % num_gpus == 0\n", + " ), \"TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\".format(\n", + " images_per_batch, num_gpus)\n", + " images_per_gpu = images_per_batch // num_gpus\n", + " shuffle = False if not is_distributed else True\n", + " num_iters = None\n", + " start_iter = 0\n", + "\n", + " if images_per_gpu > 1:\n", + " logger = logging.getLogger(__name__)\n", + " logger.warning(\n", + " \"When using more than one image per GPU you may encounter \"\n", + " \"an out-of-memory (OOM) error if your GPU does not have \"\n", + " \"sufficient memory. If this happens, you can reduce \"\n", + " \"SOLVER.IMS_PER_BATCH (for training) or \"\n", + " \"TEST.IMS_PER_BATCH (for inference). For training, you must \"\n", + " \"also adjust the learning rate and schedule length according \"\n", + " \"to the linear scaling rule. See for example: \"\n", + " \"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14\"\n", + " )\n", + "\n", + " # group images which have similar aspect ratio. In this case, we only\n", + " # group in two cases: those with width / height > 1, and the other way around,\n", + " # but the code supports more general grouping strategy\n", + " aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []\n", + "\n", + " paths_catalog = import_file(\n", + " \"maskrcnn_benchmark.config.paths_catalog\", cfg.PATHS_CATALOG, True\n", + " )\n", + " DatasetCatalog = paths_catalog.DatasetCatalog\n", + " dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST\n", + "\n", + " # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later\n", + " transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms(cfg, is_train)\n", + " \n", + " dataset.transforms = transforms\n", + " datasets = [ dataset ]\n", + " \n", + " data_loaders = []\n", + " for dataset in datasets:\n", + " sampler = make_data_sampler(dataset, shuffle, is_distributed)\n", + " batch_sampler = make_batch_data_sampler(\n", + " dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter\n", + " )\n", + " collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \\\n", + " BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)\n", + " num_workers = cfg.DATALOADER.NUM_WORKERS\n", + " data_loader = torch.utils.data.DataLoader(\n", + " dataset,\n", + " num_workers=num_workers,\n", + " batch_sampler=batch_sampler,\n", + " collate_fn=collator,\n", + " )\n", + " data_loaders.append(data_loader)\n", + " if is_train:\n", + " # during training, a single (possibly concatenated) data_loader is returned\n", + " assert len(data_loaders) == 1\n", + " return data_loaders[0]\n", + " return data_loaders" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zBrlwqT7RsdJ", + "colab_type": "text" + }, + "source": [ + "### Detection Model\n", + "\n", + "The model is the Mask RCNN as per [this](https://arxiv.org/abs/1901.02446) paper augmented with puruning functions" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dJMk5lxwRvTh", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class DetectionModel(nn.Module):\n", + " \"\"\"\n", + " Main class for Panoptic R-CNN. Currently supports boxes and masks.\n", + " It consists of three main parts:\n", + " - backbone\n", + " - rpn\n", + " - panoptic: ouputs semantic segmentation mask\n", + " - heads: takes the features + the proposals from the RPN and computes\n", + " detections / masks from it.\n", + " \"\"\"\n", + " def __init__(self, cfg, to_prune=False):\n", + " super(DetectionModel, self).__init__()\n", + "\n", + " self.backbone = build_backbone(cfg)\n", + " self.training = True\n", + " self.evaluate = False\n", + " self.rpn = build_rpn(cfg, self.backbone.out_channels)\n", + " self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)\n", + " \n", + " # Pruning Masks\n", + " self.binary_masks = None\n", + " self.to_prune = to_prune \n", + " \n", + " \n", + " ###################################### \n", + " ########Pruning Functions#############\n", + " \n", + " def prune(self, sparsity_rate):\n", + " \n", + " \n", + " self.sparsity = sparsity_rate \n", + " self.binary_masks = {}\n", + "\n", + "\n", + " for k, m in enumerate(self.modules()):\n", + "\n", + " if isinstance(m, nn.Conv2d):\n", + "\n", + " weight = m.weight.data.view(-1).clone().abs()\n", + " y, i = torch.sort(weight)\n", + "\n", + " spars_index = int(weight.shape[0]*self.sparsity/100)\n", + " threshold = y[spars_index]\n", + "\n", + " mask = weight.gt(threshold).float().cuda()\n", + " mask = mask.view(m.weight.data.shape)\n", + "\n", + " self.binary_masks[k] = mask\n", + "\n", + " m.weight.data.mul_(mask)\n", + " \n", + " def applyMasks(self):\n", + " \n", + " if not self.binary_masks:\n", + " return\n", + " \n", + " for k, m in enumerate(self.modules()):\n", + " \n", + " if isinstance(m, nn.Conv2d):\n", + " \n", + " mask = self.binary_masks[k]\n", + " m.weight.data.mul_(mask)\n", + " \n", + " \n", + " def pruningStats(self):\n", + " \n", + " total = 0\n", + " total_zero_weights = 0\n", + " for k, m in enumerate(self.modules()):\n", + "\n", + " if isinstance(m, nn.Conv2d):\n", + "\n", + " weight = m.weight.data.view(-1).clone().abs()\n", + " non_zero_weights = weight.gt(0).float().cuda().sum()\n", + " total_weights = weight.shape[0]\n", + " zero_weights = 100 - int(non_zero_weights*100/total_weights)\n", + " total += 1\n", + " total_zero_weights += zero_weights\n", + "\n", + " return total_zero_weights/total\n", + " \n", + " \n", + " ######################################\n", + " ######################################\n", + " \n", + "\n", + " def forward(self, images, targets=None):\n", + " \"\"\"\n", + " Arguments:\n", + " images (list[Tensor] or ImageList): images to be processed\n", + " targets (list[BoxList]): ground-truth boxes present in the image (optional)\n", + " Returns:\n", + " result (list[BoxList] or dict[Tensor]): the output from the model.\n", + " During training, it returns a dict[Tensor] which contains the losses.\n", + " During testing, it returns list[BoxList] contains additional fields\n", + " like `scores`, `labels` and `mask` (for Mask R-CNN models).\n", + " \"\"\"\n", + " \n", + " if self.to_prune:\n", + " self.applyMasks()\n", + "\n", + " images = to_image_list(images)\n", + " features = self.backbone(images.tensors) \n", + " proposals, proposal_losses = self.rpn(images, features, targets)\n", + " \n", + " \n", + " if self.roi_heads:\n", + " x, result, detector_losses = self.roi_heads(features, proposals, targets)\n", + " else:\n", + " # RPN-only models don't have roi_heads\n", + " x = features\n", + " result = proposals\n", + " detector_losses = {}\n", + "\n", + " if self.training: \n", + " losses = {}\n", + " losses.update(detector_losses)\n", + " losses.update(proposal_losses)\n", + " \n", + " return losses\n", + " \n", + " return result\n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NVjPYFN1Pz6D", + "colab_type": "text" + }, + "source": [ + "### Build Detection Network" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WE6K5qZ7Pt5T", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_detection_network(cfg, to_prune=False):\n", + " return DetectionModel(cfg, to_prune)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DkbBHhFtciGa", + "colab_type": "text" + }, + "source": [ + "### Compute Target Sparsity\n", + "\n", + "According to the \"To Prune or not to Prune\" [paper](https://arxiv.org/abs/1710.01878)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WF5oDgUDcqAc", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def compute_target_sparsity(starting_step, current_step, ending_step, final_sparsity, initial_sparsity, span=100):\n", + " return final_sparsity + (initial_sparsity - final_sparsity) * ( (1 - ( (current_step - starting_step - span)/(ending_step - starting_step) ) )**3 )\n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5cQ6oh2-Fa6G", + "colab_type": "text" + }, + "source": [ + "### Prune Model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "APsQaRD-FdKR", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def prune(model, meta):\n", + " \n", + " starting_step = meta['starting_step']\n", + " current_step = meta['current_step']\n", + " ending_step = meta['ending_step']\n", + " final_sparsity = meta['final_sparsity']\n", + " initial_sparsity = meta['initial_sparsity']\n", + " span = meta['span']\n", + " \n", + " sparsity = compute_target_sparsity(starting_step, current_step, ending_step, final_sparsity, initial_sparsity, span=span)\n", + " model.prune(sparsity)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kkLKDmRC0-CE", + "colab_type": "text" + }, + "source": [ + "### Train Pruned Model\n", + "\n", + "The train function is the entry point into the training process. It creates data loaders, optimisers, loads from checkpoint. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4e2-533F1Qmu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# See if we can use apex.DistributedDataParallel instead of the torch default,\n", + "# and enable mixed-precision via apex.amp\n", + "try:\n", + " from apex import amp\n", + "except ImportError:\n", + " raise ImportError('Use APEX for multi-precision via apex.amp')\n", + " \n", + "def reduce_loss_dict(loss_dict):\n", + " \"\"\"\n", + " Reduce the loss dictionary from all processes so that process with rank\n", + " 0 has the averaged results. Returns a dict with the same fields as\n", + " loss_dict, after reduction.\n", + " \"\"\"\n", + " world_size = get_world_size()\n", + " if world_size < 2:\n", + " return loss_dict\n", + " with torch.no_grad():\n", + " loss_names = []\n", + " all_losses = []\n", + " for k in sorted(loss_dict.keys()):\n", + " loss_names.append(k)\n", + " all_losses.append(loss_dict[k])\n", + " all_losses = torch.stack(all_losses, dim=0)\n", + " dist.reduce(all_losses, dst=0)\n", + " if dist.get_rank() == 0:\n", + " # only main process gets accumulated, so only divide by\n", + " # world_size in this case\n", + " all_losses /= world_size\n", + " reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n", + " return reduced_losses\n", + "\n", + "\n", + "def do_train(\n", + " model,\n", + " data_loader,\n", + " optimizer,\n", + " scheduler,\n", + " checkpointer,\n", + " device,\n", + " checkpoint_period,\n", + " arguments,\n", + " to_prune\n", + "):\n", + " logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n", + " logger.error(\"Start training\")\n", + " meters = MetricLogger(delimiter=\" \")\n", + " max_iter = len(data_loader)\n", + " start_iter = arguments[\"iteration\"]\n", + " model.train()\n", + " start_training_time = time.time()\n", + " end = time.time()\n", + " \n", + " \n", + " for iteration, (images, targets, _) in enumerate(data_loader, start_iter):\n", + " \n", + " if any(len(target) < 1 for target in targets):\n", + " logger.error(f\"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}\" )\n", + " continue\n", + " \n", + " data_time = time.time() - end\n", + " iteration = iteration + 1\n", + " arguments[\"iteration\"] = iteration\n", + " \n", + " if to_prune:\n", + " prune_meta['current_step'] = iteration\n", + "\n", + " scheduler.step()\n", + "\n", + " images = images.to(device)\n", + " targets = [target.to(device) for target in targets]\n", + " \n", + " loss_dict = model(images, targets)\n", + " \n", + " losses = sum(loss for loss in loss_dict.values())\n", + " \n", + " # reduce losses over all GPUs for logging purposes\n", + " loss_dict_reduced = reduce_loss_dict(loss_dict)\n", + " losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n", + " meters.update(loss=losses_reduced, **loss_dict_reduced)\n", + "\n", + " optimizer.zero_grad()\n", + " # Note: If mixed precision is not used, this ends up doing nothing\n", + " # Otherwise apply loss scaling for mixed-precision recipe\n", + " with amp.scale_loss(losses, optimizer) as scaled_losses:\n", + " scaled_losses.backward()\n", + " optimizer.step()\n", + "\n", + " batch_time = time.time() - end\n", + " end = time.time()\n", + " meters.update(time=batch_time, data=data_time)\n", + "\n", + " eta_seconds = meters.time.global_avg * (max_iter - iteration)\n", + " eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n", + " \n", + " \n", + " # Pruning code\n", + " if to_prune:\n", + " if (prune_meta['current_step'] - prune_meta['starting_step']) % prune_meta['span'] == 0 and prune_meta['current_step'] > prune_meta['starting_step'] and prune_meta['ending_step'] > prune_meta['current_step']:\n", + " prune(model, prune_meta)\n", + "\n", + " if iteration % 20 == 0 or iteration == max_iter:\n", + " model.applyMasks()\n", + " logger.info(\n", + " meters.delimiter.join(\n", + " [\n", + " \"eta: {eta}\",\n", + " \"iter: {iter}\",\n", + " \"{meters}\",\n", + " \"sparsity: {sparsity}\",\n", + " \"lr: {lr:.6f}\",\n", + " \"max mem: {memory:.0f}\",\n", + " \n", + " ]\n", + " ).format(\n", + " eta=eta_string,\n", + " iter=iteration,\n", + " meters=str(meters),\n", + " sparsity=model.pruningStats(),\n", + " lr=optimizer.param_groups[0][\"lr\"],\n", + " memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n", + " )\n", + " )\n", + " if iteration % checkpoint_period == 0:\n", + " checkpointer.save(\"model_{:07d}\".format(iteration), **arguments)\n", + " if iteration == max_iter:\n", + " checkpointer.save(\"model_final\", **arguments)\n", + "\n", + " total_training_time = time.time() - start_training_time\n", + " total_time_str = str(datetime.timedelta(seconds=total_training_time))\n", + " logger.info(\n", + " \"Total training time: {} ({:.4f} s / it)\".format(\n", + " total_time_str, total_training_time / (max_iter)\n", + " ))\n", + "\n", + "def train(cfg, local_rank, distributed, dataset, to_prune):\n", + " model = build_detection_network(cfg, to_prune)\n", + "\n", + " device = torch.device('cuda')\n", + " model.to(device)\n", + " \n", + " optimizer = make_optimizer(cfg, model)\n", + " scheduler = make_lr_scheduler(cfg, optimizer) \n", + "\n", + " # Initialize mixed-precision training\n", + " use_mixed_precision = cfg.DTYPE == \"float16\"\n", + " amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n", + " model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)\n", + "\n", + " if distributed:\n", + " model = torch.nn.parallel.DistributedDataParallel(\n", + " model, device_ids=[local_rank], output_device=local_rank,\n", + " # this should be removed if we update BatchNorm stats\n", + " broadcast_buffers=False,\n", + " )\n", + "\n", + " arguments = {}\n", + " arguments[\"iteration\"] = 0\n", + "\n", + " output_dir = cfg.OUTPUT_DIR\n", + " save_to_disk = get_rank() == 0\n", + " checkpointer = DetectronCheckpointer(\n", + " cfg, model, optimizer, scheduler, output_dir, save_to_disk\n", + " )\n", + " extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n", + " arguments.update(extra_checkpoint_data)\n", + "\n", + "\n", + " data_loader = build_data_loader(cfg, dataset)\n", + "\n", + " checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n", + "\n", + " do_train(\n", + " model,\n", + " data_loader,\n", + " optimizer,\n", + " scheduler,\n", + " checkpointer,\n", + " device,\n", + " checkpoint_period,\n", + " arguments,\n", + " to_prune\n", + " )\n", + "\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pVeJNhzy2DZs", + "colab_type": "text" + }, + "source": [ + "## Unpruned Model Driver\n", + "\n", + "Here we fire off training of a regular unpruned model by calling the above function. before that we set some important config for our training. We make our dataset and update our config. Then we fire off training !" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "XtgfPl7F2CEP", + "colab_type": "code", + "colab": {} + }, + "source": [ + "config_file = \"shapes_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDirNotPruned']) # The output folder where all our model checkpoints will be saved during training.\n", + "cfg.merge_from_list(['SOLVER.IMS_PER_BATCH', 25]) # Number of images to take insiade a single batch. This number depends on the size of your GPU\n", + "cfg.merge_from_list(['SOLVER.BASE_LR', 0.0001]) # The Learning Rate when training starts. Please check Detectron scaling rules to determine your learning for your GPU setup. \n", + "cfg.merge_from_list(['SOLVER.MAX_ITER', 1000]) # The number of training iterations that will be executed during training. One iteration is given as one forward and backward pass of a mini batch of the network\n", + "cfg.merge_from_list(['SOLVER.STEPS', \"(700, 800)\"]) # These two numberes represent after how many iterations is the learning rate divided by 10. \n", + "cfg.merge_from_list(['TEST.IMS_PER_BATCH', 1]) # Batch size during testing/evaluation\n", + "cfg.merge_from_list(['MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN', 2000]) # This determines how many region proposals to take in for processing into the stage after the RPN. The rule is 1000*batch_size = 4*1000 \n", + "cfg.merge_from_list(['SOLVER.CHECKPOINT_PERIOD', 100]) # After how many iterations does one want to save the model.\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "# Make the Output dir if one doesnt exist.\n", + "output_dir = cfg.OUTPUT_DIR\n", + "if output_dir:\n", + " mkdir(output_dir)\n", + "\n", + "# Start training.\n", + "model = train(cfg, local_rank=1, distributed=False, dataset=ShapeDataset(2000), to_prune=False)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uKfqWq3jKBcb", + "colab_type": "text" + }, + "source": [ + "# Prune Model Driver\n", + "\n", + "Here we fire off training of a pruned model by calling the above function." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3MMMJlQHaXbV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!rm -rf segDirPruned" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "KI4Xye__asS0", + "colab_type": "code", + "colab": {} + }, + "source": [ + "config_file = \"shapes_config.yaml\"\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDirPruned']) # The output folder where all our model checkpoints will be saved during training.\n", + "cfg.merge_from_list(['SOLVER.IMS_PER_BATCH', 25]) # Number of images to take insiade a single batch. This number depends on the size of your GPU\n", + "cfg.merge_from_list(['SOLVER.BASE_LR', 0.0001]) # The Learning Rate when training starts. Please check Detectron scaling rules to determine your learning for your GPU setup. \n", + "cfg.merge_from_list(['SOLVER.MAX_ITER', 1000]) # The number of training iterations that will be executed during training. One iteration is given as one forward and backward pass of a mini batch of the network\n", + "cfg.merge_from_list(['SOLVER.STEPS', \"(800, 900)\"]) # These two numberes represent after how many iterations is the learning rate divided by 10. \n", + "cfg.merge_from_list(['TEST.IMS_PER_BATCH', 1]) # Batch size during testing/evaluation\n", + "cfg.merge_from_list(['MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN', 2000]) # This determines how many region proposals to take in for processing into the stage after the RPN. The rule is 1000*batch_size = 4*1000 \n", + "cfg.merge_from_list(['SOLVER.CHECKPOINT_PERIOD', 100]) # After how many iterations does one want to save the model.\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "# Make the Output dir if one doesnt exist.\n", + "output_dir = cfg.OUTPUT_DIR\n", + "if output_dir:\n", + " mkdir(output_dir)\n", + " \n", + "# Prune Config\n", + "# 70 percent sparsity \n", + "prune_meta = {\n", + " \"starting_step\" : 0,\n", + " \"current_step\": 0,\n", + " \"ending_step\": 700, # final 200 steps train with frozen masks \n", + " \"final_sparsity\": 70,\n", + " \"initial_sparsity\": 0,\n", + " \"span\": 40\n", + "}\n", + "\n", + "# Start training.\n", + "pruned_model = train(cfg, local_rank=1, distributed=False, dataset=ShapeDataset(2000), to_prune=True)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zp8nWrizaPBN", + "colab_type": "text" + }, + "source": [ + "# Evaluate" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9cyRGdaNaRQI", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def do_inference(\n", + " model,\n", + " data_loader,\n", + " dataset_name,\n", + " iou_types=(\"bbox\",),\n", + " box_only=False,\n", + " device=\"cuda\",\n", + " expected_results=(),\n", + " expected_results_sigma_tol=4,\n", + " output_folder=None,):\n", + " \n", + " # convert to a torch.device for efficiency\n", + " device = torch.device(device)\n", + " num_devices = get_world_size()\n", + " logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n", + " dataset = data_loader.dataset\n", + " logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(dataset)))\n", + " total_timer = Timer()\n", + " inference_timer = Timer()\n", + " total_timer.tic()\n", + " predictions = compute_on_dataset(model, data_loader, device, inference_timer)\n", + " \n", + " # wait for all processes to complete before measuring the time\n", + " synchronize()\n", + " total_time = total_timer.toc()\n", + " total_time_str = get_time_str(total_time)\n", + " logger.info(\n", + " \"Total run time: {} ({} s / img per device, on {} devices)\".format(\n", + " total_time_str, total_time * num_devices / len(dataset), num_devices\n", + " )\n", + " )\n", + " \n", + " total_infer_time = get_time_str(inference_timer.total_time)\n", + " logger.info(\n", + " \"Model inference time: {} ({} s / img per device, on {} devices)\".format(\n", + " total_infer_time,\n", + " inference_timer.total_time * num_devices / len(dataset),\n", + " num_devices,\n", + " )\n", + " )\n", + " \n", + " predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n", + " if not is_main_process():\n", + " return\n", + "\n", + " if output_folder:\n", + " torch.save(predictions, os.path.join(output_folder, \"predictions.pth\"))\n", + "\n", + " extra_args = dict(\n", + " box_only=box_only,\n", + " iou_types=iou_types,\n", + " expected_results=expected_results,\n", + " expected_results_sigma_tol=expected_results_sigma_tol,\n", + " )\n", + "\n", + " return coco_evaluation(dataset=dataset,\n", + " predictions=predictions,\n", + " output_folder=output_folder,\n", + " **extra_args)\n", + "\n", + "def run_test(cfg, model, distributed, dataset):\n", + " if distributed:\n", + " model = model.module\n", + " torch.cuda.empty_cache() # TODO check if it helps\n", + " iou_types = (\"bbox\",)\n", + " \n", + " data_loaders_val = build_data_loader(cfg, dataset, is_train=False)\n", + " mkdir(\"shapeVal\")\n", + " model.evaluate = True\n", + " \n", + " for data_loader in data_loaders_val:\n", + " do_inference(\n", + " model,\n", + " data_loader, # For test we need this as zero\n", + " dataset_name=\"shape-val\",\n", + " iou_types=iou_types,\n", + " box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n", + " device=cfg.MODEL.DEVICE,\n", + " expected_results=cfg.TEST.EXPECTED_RESULTS,\n", + " expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n", + " output_folder=\"shapeVal\",\n", + " )\n", + " synchronize()\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P4dHs-N1W9Cg", + "colab_type": "text" + }, + "source": [ + "## Evaluate Pruned and Unpruned model\n", + "\n", + "Here we check the differences between a pruned and unpruned model. By checking the sparsity rate and COCO mAP evaluation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JHGkrOKhWn4V", + "colab_type": "text" + }, + "source": [ + "### Verify sparsity \n", + "\n", + "Here we check the rate of sparsity of our convolutional kernels. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "DtAXrFoxWdAh", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 52 + }, + "outputId": "0369b24b-46ef-4204-a3b6-50b9477556c9" + }, + "source": [ + "print(\"Sparsity of convolution weights of Pruned Model is \", pruned_model.pruningStats())\n", + "\n", + "print(\"Sparsity of convolution weights of Unpruned Model is \", model.pruningStats())" + ], + "execution_count": 39, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Sparsity of convolution weights of Pruned Model is 70.02898550724638\n", + "Sparsity of convolution weights of Unpruned Model is 0.0\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y_OwJDO2W1Bg", + "colab_type": "text" + }, + "source": [ + "### Evaluate Pruned Model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "awvrvnEiHfNs", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 674 + }, + "outputId": "b3689112-a9f9-4741-a275-15dd3e0bb706" + }, + "source": [ + "run_test(cfg, model=pruned_model, distributed=False, dataset=ShapeDataset(50))" + ], + "execution_count": 40, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n", + "2019-07-12 05:21:57,947 maskrcnn_benchmark.inference INFO: Start evaluation on shape-val dataset(50 images).\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "100%|██████████| 50/50 [00:04<00:00, 10.73it/s]" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "2019-07-12 05:22:03,019 maskrcnn_benchmark.inference INFO: Total run time: 0:00:05.069178 (0.10138356208801269 s / img per device, on 1 devices)\n", + "2019-07-12 05:22:03,021 maskrcnn_benchmark.inference INFO: Model inference time: 0:00:04.547834 (0.09095668315887451 s / img per device, on 1 devices)\n", + "2019-07-12 05:22:03,040 maskrcnn_benchmark.inference INFO: Preparing results for COCO format\n", + "2019-07-12 05:22:03,045 maskrcnn_benchmark.inference INFO: Preparing bbox results\n", + "2019-07-12 05:22:03,060 maskrcnn_benchmark.inference INFO: Evaluating predictions\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "Loading and preparing results...\n", + "DONE (t=0.01s)\n", + "creating index...\n", + "index created!\n", + "Running per image evaluation...\n", + "Evaluate annotation type *bbox*\n", + "DONE (t=0.16s).\n", + "Accumulating evaluation results...\n", + "DONE (t=0.03s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.639\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.880\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.778\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.644\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.562\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.699\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.702\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.702\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + "2019-07-12 05:22:03,322 maskrcnn_benchmark.inference INFO: \n", + "Task: bbox\n", + "AP, AP50, AP75, APs, APm, APl\n", + "0.6387, 0.8799, 0.7780, -1.0000, 0.6443, -1.0000\n", + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T0zqqGPJXi5n", + "colab_type": "text" + }, + "source": [ + "### Evaluate Unpruned Model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gIuhGoZDXmtz", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 674 + }, + "outputId": "b0310be5-cfae-42b7-9586-50b98176236a" + }, + "source": [ + "run_test(cfg, model=model, distributed=False, dataset=ShapeDataset(50))" + ], + "execution_count": 41, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n", + "2019-07-12 05:22:03,382 maskrcnn_benchmark.inference INFO: Start evaluation on shape-val dataset(50 images).\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "100%|██████████| 50/50 [00:04<00:00, 11.19it/s]" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "2019-07-12 05:22:08,099 maskrcnn_benchmark.inference INFO: Total run time: 0:00:04.715749 (0.09431497573852539 s / img per device, on 1 devices)\n", + "2019-07-12 05:22:08,101 maskrcnn_benchmark.inference INFO: Model inference time: 0:00:04.222170 (0.08444340705871582 s / img per device, on 1 devices)\n", + "2019-07-12 05:22:08,118 maskrcnn_benchmark.inference INFO: Preparing results for COCO format\n", + "2019-07-12 05:22:08,121 maskrcnn_benchmark.inference INFO: Preparing bbox results\n", + "2019-07-12 05:22:08,141 maskrcnn_benchmark.inference INFO: Evaluating predictions\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "Loading and preparing results...\n", + "DONE (t=0.01s)\n", + "creating index...\n", + "index created!\n", + "Running per image evaluation...\n", + "Evaluate annotation type *bbox*\n", + "DONE (t=0.12s).\n", + "Accumulating evaluation results...\n", + "DONE (t=0.03s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.686\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.943\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.767\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.694\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.577\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.746\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.749\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.749\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000\n", + "2019-07-12 05:22:08,332 maskrcnn_benchmark.inference INFO: \n", + "Task: bbox\n", + "AP, AP50, AP75, APs, APm, APl\n", + "0.6858, 0.9433, 0.7668, -1.0000, 0.6935, -1.0000\n", + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ccHt8YMdKq6K", + "colab_type": "text" + }, + "source": [ + "# Visualise\n", + "\n", + "An important part of validating your model is visualising the results. This is done below" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WOXgrPWIa-ND", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 87 + }, + "outputId": "aa5ff55c-01d5-4128-efcc-013f342da713" + }, + "source": [ + "# Load Dataset\n", + "dataset = ShapeDataset(50)" + ], + "execution_count": 52, + "outputs": [ + { + "output_type": "stream", + "text": [ + "loading annotations into memory...\n", + "Done (t=0.00s)\n", + "creating index...\n", + "index created!\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-vim-GMFa_1X", + "colab_type": "text" + }, + "source": [ + "### Load unpruned model for vis" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kb9VchvVzRpu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Load Trained Model\n", + "config_file = \"shapes_config.yaml\"\n", + "\n", + "cfg.merge_from_file(config_file)\n", + "# manual override some options\n", + "# cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n", + "\n", + "# manual override some options\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDirNotPruned']) # The output folder where all our model checkpoints will be saved during training.\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TEST', 192])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TEST', 192])\n", + "\n", + "\n", + "unpruned_demo = COCODemo(\n", + " cfg, \n", + " min_image_size=192,\n", + " confidence_threshold=0.7)\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n", + "\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-uCq7_NPbFT9", + "colab_type": "text" + }, + "source": [ + "### Load pruned model for vis" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "goyZr5o4bFpC", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Load Trained Model\n", + "config_file = \"shapes_config.yaml\"\n", + "\n", + "cfg.merge_from_file(config_file)\n", + "# manual override some options\n", + "# cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n", + "\n", + "# manual override some options\n", + "cfg.merge_from_list(['OUTPUT_DIR', 'segDirPruned']) # The output folder where all our model checkpoints will be saved during training.\n", + "\n", + "# update the config options with the config file\n", + "cfg.merge_from_file(config_file)\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TRAIN', \"(192, )\"])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TRAIN', 192])\n", + "\n", + "cfg.merge_from_list(['INPUT.MIN_SIZE_TEST', 192])\n", + "cfg.merge_from_list(['INPUT.MAX_SIZE_TEST', 192])\n", + "\n", + "\n", + "pruned_demo = COCODemo(\n", + " cfg, \n", + " min_image_size=192,\n", + " confidence_threshold=0.7)\n", + "\n", + "# Add these for printing class names over your predictions.\n", + "COCODemo.CATEGORIES = [\n", + " \"__background\",\n", + " \"square\",\n", + " \"circle\",\n", + " \"triangle\"\n", + "]\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c8b6wHAXjyE5", + "colab_type": "text" + }, + "source": [ + "## Visualise" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RSPq97dtWFrA", + "colab_type": "text" + }, + "source": [ + "### Input Image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ir-cYCvKSbNI", + "colab_type": "code", + "outputId": "83911692-1805-464a-a163-48d575e2340e", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 485 + } + }, + "source": [ + "# Visualise Input Image\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i+5)\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(img)\n", + "plt.show()" + ], + "execution_count": 56, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3W2wXWV99/Hvj8TwJMiDNMWEW7BS\ne2OnVSZD6WitI31Aag29ax28nRqV20w7arW2Y7G+wM7UGa2tVjstThRK7KCIqAPTaiulWqcvoAZE\nHlUiiiQNRFGEUR4M/O8XZ6XdxoTk7LX3Xtc+5/uZOXPWvvbaZ//POuef37muvfZKqgpJktSOg4Yu\nQJIk/SjDWZKkxhjOkiQ1xnCWJKkxhrMkSY0xnCVJaszUwjnJmUm+kmRrkvOm9TySpstelmYv03if\nc5IVwFeBXwW2AV8AXlZVt078ySRNjb0sDWNaM+fTgK1VdUdVPQJcCqyf0nNJmh57WRrAyil93TXA\nXSO3twG/sK+dc8yRxQk/MaVSpDl149e+XVXHDVzFonoZYNVBB9ehKw6falHSPHnw0e/zyGMPZzGP\nmVY471eSjcBGANYcx8p/ftdQpUhN2vWU/3Pn0DUcqNF+PuSgwzj96F8duCKpHdd896pFP2Zay9rb\ngRNGbq/txv5bVW2qqnVVtY5jj5xSGZJ62m8vw4/286qDDp5ZcdJSNa1w/gJwcpKTkqwCzgGunNJz\nSZoee1kawFSWtatqV5LXAf8CrAAuqqpbpvFckqbHXpaGMbXXnKvqU8CnpvX1Jc2GvSzNnlcIkySp\nMYazJEmNMZwlSWqM4SxJUmMMZ0mSGmM4S5LUGMNZkqTGGM6SJDXGcJYkqTGGsyRJjTGcJUlqjOEs\nSVJjDGdJkhpjOEuS1BjDWZKkxhjOkiQ1xnCWJKkxhrMkLcLhF71w6BK0DBjOkiQ1ZuXQBUhSCxYz\nIz7Qfb//6k+PW46WOcNZ0rI17SXq0a9vUGsxXNaWJKkxY8+ck5wAfAhYDRSwqarem+QY4KPAicA3\ngJdW1Xf7lyppWpZTPw91QpezaC1Gn2XtXcAfVdX1SY4ArktyFfBK4OqqekeS84DzgD/pX6qkKVrS\n/dzaGdYGtfZn7GXtqtpRVdd32w8AtwFrgPXA5m63zcDZfYuUNF32s9SWibzmnORE4NnAtcDqqtrR\n3XU3C8tkkubEUuvn1mbNe2q9Pg2j99naSZ4IfBx4Y1Xdn+S/76uqSlL7eNxGYCMAa47rW4akCZhE\nPx9y0GGzKHW/5in0dtfqErd26zVzTvIEFhr5kqr6RDd8T5Lju/uPB3bu7bFVtamq1lXVOo49sk8Z\nkiZgUv286qCDZ1OwtISNHc5Z+JP6QuC2qnr3yF1XAhu67Q3AFeOXJ2kWllI/H37RC+dq1jxqXuvW\n5PVZ1n4O8LvATUlu6Mb+FHgHcFmSc4E7gZf2K1HSDNjPUkPGDueq+g8g+7j7jHG/rqTZWyr9vBRm\nnr7+LPDynZKWgKUQynsypJc3L98pSVJjDGdJkhozd8va/3vdfw5dwpJw25bThi5BmoiluKQ96vCL\nXujS9jLkzFmSpMYYzpIkNWbulrUlCZb+cvYoz9xefpw5S5LUGMNZkqTGGM6S5s5yWtIetVy/7+XI\ncJYkqTGGsyRJjTGcJUlqjOEsSVJjDGdJkhpjOEuS1BjDWZKkxhjOkiQ1xnCWJKkx/scXkuaGV8jy\nP8FYLgxnSXNjdyAt55A2lJcHl7UlSWpM73BOsiLJF5P8Y3f7pCTXJtma5KNJVvUvU9Is2M9SGyYx\nc34DcNvI7XcC76mqpwPfBc6dwHNImg37WWpAr3BOshb4DeCD3e0ALwAu73bZDJzd5zkkzYb9LLWj\n78z5r4E3A491t48F7quqXd3tbcCavT0wycYkW5Js4d77e5YhaQIm0s+PPPbw9CuVlrixwznJi4Cd\nVXXdOI+vqk1Vta6q1nHskeOWIWkCJtnPqw46eMLVSctPn7dSPQd4cZKzgEOAI4H3AkclWdn9tb0W\n2N6/TElTZj9LDRl75lxVb6mqtVV1InAO8G9V9XLgs8BLut02AFf0rlLSVNnPUlum8T7nPwHelGQr\nC69ZXTiF55A0G/azNICJXCGsqj4HfK7bvgM4bRJfV9LszUM/f//Vn16WVwnz6mDLh1cIkySpMYaz\nJEmN8T++kDSXltN/guFy9vLjzFmSpMYYzpIkNcZwljTXlvqS71L//rR3hrMkSY0xnCVJaoxna0ua\ne0vxzG2Xs5c3Z86SloylEGjff/Wnl8T3oX4MZ0mSGmM4S1pS5nnmOa91a/J8zVnSkjRPr0MbytqT\nM2dJkhpjOEta0lqflbZen4ZhOEuS1Bhfc5a05I3OTlt4DdrZsvbHcJa0rAwV1AayFsNlbUmSGuPM\nWdKyNe1ZtLNljctwliQOPEgPv+iFhq6mzmVtSZIa0yuckxyV5PIkX05yW5JfTHJMkquS3N59PnpS\nxUqaHvv5wDhr1iz0nTm/F/jnqvoZ4OeB24DzgKur6mTg6u62pPbZz1Ijxg7nJE8CngdcCFBVj1TV\nfcB6YHO322bg7L5FSpou+1lqS5+Z80nAt4C/T/LFJB9Mcjiwuqp2dPvcDaze24OTbEyyJckW7r2/\nRxmSJmBi/fzIYw/PqGRp6eoTziuBU4ELqurZwPfZY8mrqgqovT24qjZV1bqqWsexR/YoQ9IETKyf\nVx108NSLXc7O/5tbOP9vbhm6DE1Zn3DeBmyrqmu725ez0Nz3JDkeoPu8s1+JkmbAfpYaMnY4V9Xd\nwF1JntENnQHcClwJbOjGNgBX9KpQ0tTZz/NhdMbs7Hlp63sRktcDlyRZBdwBvIqFwL8sybnAncBL\nez6HpNmwn6VG9ArnqroBWLeXu87o83UlzZ79LLXDy3dKUuP2tYS9e/zPXv/MWZajGfDynZIkNcZw\nliSpMYazJDXsQM7K9sztpcdwliSpMZ4QJkkNcja8vDlz1tT8znvvHroEadnwsp5Li+EsSVJjXNbW\nxI3OmHdvf+wNPzlUOdJc6Tv7Pf9vbvF9z0uAM2dJkhpjOEuS1BjDWRPzO++9e58ngXlymLR/kzqh\ny5PD5p+vOUvSwAxS7cmZsyRJjTGcNREHsmz9eMvekibP5e355bK2ejFspX4MT+2NM2dJkhpjOGvm\nnG1Ls+XsfP4YzpIkNcbXnDW2PjNgL+up5W7Ws9ndz+elPeeDM2dJkhpjOEuS1Jhe4ZzkD5PckuTm\nJB9JckiSk5Jcm2Rrko8mWTWpYtWGSb5f2ZPD2mE/z86QJ2h5cth8GPs15yRrgD8ATqmqB5NcBpwD\nnAW8p6ouTfJ+4FzggolUC9y25bRJfSmNwTBdmobq5+XGYNSB6rusvRI4NMlK4DBgB/AC4PLu/s3A\n2T2fQ9Js2M9SI8YO56raDvwl8E0Wmvh7wHXAfVW1q9ttG7Bmb49PsjHJliRbuPf+ccvQEuBlPYc3\nyX5+5LGHZ1GyevCynu3rs6x9NLAeOAm4D/gYcOaBPr6qNgGbAPLzT69x69BsGJ5L2yT7+UlPOMZ+\n3gvDUIvRZ1n7V4CvV9W3quqHwCeA5wBHdctiAGuB7T1rlDR99rPUkD7h/E3g9CSHJQlwBnAr8Fng\nJd0+G4Ar+pWo5cLZ+aDs52XI2Xy7xl7Wrqprk1wOXA/sAr7IwrLWPwGXJvnzbuzCSRSq4cwyNL1y\n2DDs5+kxADWOXpfvrKrzgfP3GL4D8P1O0pyxn6V2eIUwSZqSeZg1e+Z2mwxn7dOQb3Hy7VXSbBnQ\nbTGcJUlqjP9lpPbKWas0Pmeh6suZsyRJjTGc1TRn8NLseHJYO1zW1o9oMQx977PmhcGmSXHmLElS\nYwxnSZqApTRrXkrfy7wynAXMx/uKW69PkibFcJYkqTGeECZnpFIPS3UJePf39Wevf+bAlSxPzpw1\nV+Zh+V1aSnx71TAMZ0mSGuOy9jLmDFTqxxmlpsWZs+aSf1hoSMtxqXe5fb9DM5wlSWqM4bxMLYWZ\npyeHSVqqfM15mTHMpH6W8/Kub6+aHWfOkiQ1xnDW3HM1QNJSYzhLktSY/YZzkouS7Exy88jYMUmu\nSnJ79/nobjxJ3pdka5Ibk5w6zeK1OM4wZT+Pbzm+fWpfPA7TdyAz54uBM/cYOw+4uqpOBq7ubgO8\nEDi5+9gIXDCZMqXH55nbB+xi7GdNgH+sTNd+w7mqPg98Z4/h9cDmbnszcPbI+IdqwTXAUUmOn1Sx\nkvqxn6X5MO5bqVZX1Y5u+25gdbe9BrhrZL9t3dgO9pBkIwt/jcOa48YsQwfCGaX2Y6L9fMhBh02v\n0oE4Q9Ss9T4hrKoKqDEet6mq1lXVOo49sm8Z2oflFszL7fudtEn086qDDp5CZcNw6Xb/PD7TMW44\n37N7eav7vLMb3w6cMLLf2m5MUrvsZ6kx44bzlcCGbnsDcMXI+Cu6szxPB743slwmzYQnhy2a/Sw1\nZr+vOSf5CPB84MlJtgHnA+8ALktyLnAn8NJu908BZwFbgR8Ar5pCzToAhpP2xn4+MC7VLo6X9Zy8\n/YZzVb1sH3edsZd9C3ht36IkTYf9LM0HrxCmJcvlbUnzynBeggwkaXwuaY/PYzc5hrMkSY3x/3Ne\nQpwx793vvPduPvaGnxy6DM0JT2pSC5w5S5LUGMNZkqTGuKy9BLicvX+7j5HL25LmgTNnSZIaYzhr\nWXGVQdI8MJznnGEjSUuP4SxJUmM8IWxOOWMenyeHSWqdM2dJkhpjOEuS1BjDeQ65pD0ZHkdJrTKc\ntaz530pKapHhLElSYzxbe444w5Ok5cGZs4R/+Ehqi+EsSVJjDOc54cxOkpYPw1mSpMbsN5yTXJRk\nZ5KbR8beleTLSW5M8skkR43c95YkW5N8JcmvT6vw5cK3+szOcjjW9rM0Hw5k5nwxcOYeY1cBP1tV\nPwd8FXgLQJJTgHOAZ3aP+bskKyZWraS+LsZ+lpq333Cuqs8D39lj7DNVtau7eQ2wttteD1xaVQ9X\n1deBrcBpE6xXUg/2szQfJvGa86uBT3fba4C7Ru7b1o39mCQbk2xJsoV7759AGUvPUl9ibdVyWN5+\nHL37+ZHHHp5yidLS1yuck7wV2AVcstjHVtWmqlpXVes49sg+ZUhTsdwCelL9vOqggydfnLTMjH2F\nsCSvBF4EnFFV1Q1vB04Y2W1tNyapYfaz1JaxwjnJmcCbgV+uqh+M3HUl8OEk7waeApwM/GfvKpeZ\n5TZj07DsZ6k9+w3nJB8Bng88Ock24HwWzuY8GLgqCcA1VfV7VXVLksuAW1lYHnttVT06reKXIoO5\nLbt/Hh97w08OXMlk2M/SfNhvOFfVy/YyfOHj7P924O19ipI0HfazNB+8QpgkSY0xnBuxzN++0zx/\nNpJmyXCWJKkxhrMkSY0Z+33OmgyXS+fHUjtzW1K7nDlLktSY/M/FgAYsIvkW8H3g20PXsg9Pps3a\nWq0L2q2t1brgx2t7alUdN1Qx40ryAPCVoevYh3n6+bei1bpgfmpbdC83Ec4ASbZU1bqh69ibVmtr\ntS5ot7ZW64K2a1uMlr8Pa1u8VuuCpV2by9qSJDXGcJYkqTEthfOmoQt4HK3W1mpd0G5trdYFbde2\nGC1/H9a2eK3WBUu4tmZec5YkSQtamjlLkiQaCOckZyb5SpKtSc4buJYTknw2ya1Jbknyhm78bUm2\nJ7mh+zhroPq+keSmroYt3dgxSa5Kcnv3+egZ1/SMkeNyQ5L7k7xxqGOW5KIkO5PcPDK212OUBe/r\nfvduTHLqALW9K8mXu+f/ZJKjuvETkzw4cvzeP83aJqWVfraXx67Lfh6/rsn2clUN9gGsAL4GPA1Y\nBXwJOGXAeo4HTu22jwC+CpwCvA344yGPVVfTN4An7zH2F8B53fZ5wDsH/nneDTx1qGMGPA84Fbh5\nf8cIOAv4NBDgdODaAWr7NWBlt/3OkdpOHN1vHj5a6md7eWI/T/v5wOuaaC8PPXM+DdhaVXdU1SPA\npcD6oYqpqh1VdX23/QBwG7BmqHoO0Hpgc7e9GTh7wFrOAL5WVXcOVUBVfR74zh7D+zpG64EP1YJr\ngKOSHD/L2qrqM1W1q7t5DbB2Ws8/A830s708EfbzIuqadC8PHc5rgLtGbm+jkQZKciLwbODabuh1\n3XLFRUMsN3UK+EyS65Js7MZWV9WObvtuYPUwpQFwDvCRkdstHDPY9zFq7ffv1Sz85b/bSUm+mOTf\nk/zSUEUtQmvHE7CXe7Cfx9e7l4cO5yYleSLwceCNVXU/cAHwU8CzgB3AXw1U2nOr6lTghcBrkzxv\n9M5aWEMZ5PT7JKuAFwMf64ZaOWY/Yshj9HiSvBXYBVzSDe0A/ldVPRt4E/DhJEcOVd+8spfHYz+P\nb1K9PHQ4bwdOGLm9thsbTJInsNDMl1TVJwCq6p6qerSqHgM+wMLy3cxV1fbu807gk10d9+xeuuk+\n7xyiNhb+kbm+qu7pamzimHX2dYya+P1L8krgRcDLu39sqKqHq+rebvs6Fl7L/elZ17ZITRzP3ezl\nXuznMUyyl4cO5y8AJyc5qftL7RzgyqGKSRLgQuC2qnr3yPjo6xa/Bdy852NnUNvhSY7Yvc3CyQc3\ns3C8NnS7bQCumHVtnZcxsgTWwjEbsa9jdCXwiu4sz9OB740sl81EkjOBNwMvrqofjIwfl2RFt/00\n4GTgjlnWNoZm+tle7s1+XqSJ9/K0zmY70A8WzrD7Kgt/Tbx14Fqey8ISyY3ADd3HWcA/ADd141cC\nxw9Q29NYOPv1S8Atu48VcCxwNXA78K/AMQPUdjhwL/CkkbFBjhkL/6DsAH7IwmtO5+7rGLFwVuff\ndr97NwHrBqhtKwuvk+3+fXt/t+9vdz/nG4Drgd+c9c91zO+xiX62l3vVZz+PV9dEe9krhEmS1Jih\nl7UlSdIeDGdJkhpjOEuS1BjDWZKkxhjOkiQ1xnCWJKkxhrMkSY0xnCVJaozhLElSYwxnSZIaYzhL\nktQYw1mSpMYYzpIkNcZwliSpMYazJEmNMZwlSWqM4SxJUmMMZ0mSGmM4S5LUGMNZkqTGGM6SJDVm\nauGc5MwkX0myNcl503oeSdNlL0uzl6qa/BdNVgBfBX4V2AZ8AXhZVd068SeTNDX2sjSMlVP6uqcB\nW6vqDoAklwLrgb029MpDj60nHHnClEqR5tNDO7/07ao6buAyFtXLACsPSa06IjMqT2rfIw8Uux6q\nRTXFtMJ5DXDXyO1twC+M7pBkI7AR4AlHrOXp//eqKZUizaeb//on7hy6Bg6gl2GPfn5iePpvHzKb\n6qQ5sPXjDy36MYOdEFZVm6pqXVWtW3HosUOVIWkCRvt5pbks9TatcN4OjK5Tr+3GJM0Xe1kawLTC\n+QvAyUlOSrIKOAe4ckrPJWl67GVpAFN5zbmqdiV5HfAvwArgoqq6ZRrPJWl67GVpGNM6IYyq+hTw\nqWl9fUmzYS9Ls+cVwiRJaozhLElSYwxnSZIaYzhLktQYw1mSpMYYzpIkNcZwliSpMYazJEmNMZwl\nSWqM4SxJUmMMZ0mSGmM4S5LUGMNZkqTGGM6SJDXGcJYkqTGGsyRJjTGcJUlqjOEsSVJjDGdJkhpj\nOEuS1BjDWZKkxhjOkiQ1ZuxwTnJCks8muTXJLUne0I0fk+SqJLd3n4+eXLmSpsF+ltrSZ+a8C/ij\nqjoFOB14bZJTgPOAq6vqZODq7rakttnPUkPGDueq2lFV13fbDwC3AWuA9cDmbrfNwNl9i5Q0Xfaz\n1JaJvOac5ETg2cC1wOqq2tHddTeweh+P2ZhkS5Itjz547yTKkDQBfft510MzKVNa0nqHc5InAh8H\n3lhV94/eV1UF1N4eV1WbqmpdVa1bceixfcuQNAGT6OeVh8ygUGmJW9nnwUmewEIjX1JVn+iG70ly\nfFXtSHI8sLNvkZKmbzn28wn3fWPoEppx11EnDl2CRvQ5WzvAhcBtVfXukbuuBDZ02xuAK8YvT9Is\n2M9SW/rMnJ8D/C5wU5IburE/Bd4BXJbkXOBO4KX9SpQ0A/az1JCxw7mq/gPIPu4+Y9yvK2n27Gep\nLV4hTJKkxhjOkiQ1xnCWJKkxhrMkSY0xnCVJaozhLElSYwxnSZIaYzhLktQYw1mSpMYYzpIkNcZw\nliSpMYazJEmNMZwlSWqM4SxJUmMMZ0mSGmM4S5LUGMNZkqTGGM6SJDXGcJYkqTGGsyRJjTGcJUlq\nTO9wTrIiyReT/GN3+6Qk1ybZmuSjSVb1L1PSLNjPUhsmMXN+A3DbyO13Au+pqqcD3wXOncBzSJoN\n+1lqQK9wTrIW+A3gg93tAC8ALu922Qyc3ec5JM2G/Sy1o+/M+a+BNwOPdbePBe6rql3d7W3Amp7P\nIWk27GepEWOHc5IXATur6roxH78xyZYkWx598N5xy5A0AZPs510PTbg4aRla2eOxzwFenOQs4BDg\nSOC9wFFJVnZ/ba8Ftu/twVW1CdgEcOjqZ1WPOiT1N7F+Puy4g+xnqaexZ85V9ZaqWltVJwLnAP9W\nVS8HPgu8pNttA3BF7yolTZX9LLVlGu9z/hPgTUm2svCa1YVTeA5Js2E/SwPos6z936rqc8Dnuu07\ngNMm8XUlzZ79LA3PK4RJktQYw1mSpMYYzpIkNcZwliSpMYazJEmNMZwlSWqM4SxJUmMMZ0mSGmM4\nS5LUGMNZkqTGGM6SJDXGcJYkqTGGsyRJjTGcJUlqjOEsSVJjDGdJkhpjOEuS1BjDWZKkxhjOkiQ1\nxnCWJKkxhrMkSY0xnCVJakyvcE5yVJLLk3w5yW1JfjHJMUmuSnJ79/noSRUraXrsZ6kdfWfO7wX+\nuap+Bvh54DbgPODqqjoZuLq7Lal99rPUiLHDOcmTgOcBFwJU1SNVdR+wHtjc7bYZOLtvkZKmy36W\n2tJn5nwS8C3g75N8MckHkxwOrK6qHd0+dwOr+xYpaersZ6khfcJ5JXAqcEFVPRv4PnsseVVVAbW3\nByfZmGRLki2PPnhvjzIkTcDE+nnXQ1OvVVry+oTzNmBbVV3b3b6chea+J8nxAN3nnXt7cFVtqqp1\nVbVuxaHH9ihD0gRMrJ9XHjKTeqUlbexwrqq7gbuSPKMbOgO4FbgS2NCNbQCu6FWhpKmzn6W2rOz5\n+NcDlyRZBdwBvIqFwL8sybnAncBLez6HpNmwn6VG9ArnqroBWLeXu87o83UlzZ79LLXDK4RJktQY\nw1mSpMb0fc1ZkubWXUedOHQJ0l45c5YkqTGGsyRJjTGcJUlqjOEsSVJjDGdJkhpjOEuS1BjDWcvC\nB444lw8cce7QZUjSATGcJUlqjBch0dwaZya8mMe85oELF/31JWkSDGfNjVkvS+/5fIa1pFlxWVuS\npMY4c1bTWjqJa7QWZ9GSpslwVlNaCuPHY1BLmiaXtSVJaowzZw1uXmbL++IsWtKkOXOWJKkxzpwX\nYeUDK4YuYW7tOuLRvY7P+6x5T7u/H2fQkvownDWIpRbKezKkJfXhsrYkSY3pNXNO8ofA/wMKuAl4\nFXA8cClwLHAd8LtV9UjPOrUELPXZ8t7M08li9rPUjrFnzknWAH8ArKuqnwVWAOcA7wTeU1VPB74L\nLL9/kfVjlmMw76nlY2A/S23pu6y9Ejg0yUrgMGAH8ALg8u7+zcDZPZ9D0mzYz1Ijxg7nqtoO/CXw\nTRaa+HssLHvdV1W7ut22AWv29vgkG5NsSbLl0QfvHbcMSRMwyX7e9dAsKpaWtrFfc05yNLAeOAm4\nD/gYcOaBPr6qNgGbAA5d/awatw617YKnvHLoEprS6lnck+znw447yH6WeuqzrP0rwNer6ltV9UPg\nE8BzgKO6ZTGAtcD2njVKmj77WWpIn3D+JnB6ksOSBDgDuBX4LPCSbp8NwBX9StS8cta8bw2eHGY/\nSw0Ze1m7qq5NcjlwPbAL+CILy1r/BFya5M+7sbbW7zR1hvKBaWmJ236W2tLrfc5VdT5w/h7DdwCn\n9fm6kmbPfpba4RXCJElqjNfW1sS4nD2eDxxxbhNL25La4cxZkqTGGM6SJDXGcJYkqTGGsybC15v7\n+cAR57b43mdJAzGcJUlqjOEsSVJjfCuVenE5e7J8W5UkcOYsSVJzDGdJkhpjOEuS1Bhfc9bYfL15\nOna/peoXB65D0nCcOUuS1BjDWZKkxhjOkiQ1xnCWJKkxhrMkSY0xnCVJaozhLElSYwxnSZIaYzhL\nktSY/YZzkouS7Exy88jYMUmuSnJ79/nobjxJ3pdka5Ibk5w6zeIlLY79LM2HA5k5XwycucfYecDV\nVXUycHV3G+CFwMndx0bggsmUqZZc8JRXeunO+XUx9rPUvP2Gc1V9HvjOHsPrgc3d9mbg7JHxD9WC\na4Cjkhw/qWIl9WM/S/Nh3NecV1fVjm77bmB1t70GuGtkv23d2I9JsjHJliRbHn3w3jHL0BB+/78u\n5vf/6+Khy9DkTLSfdz00vUKl5aL3CWFVVUCN8bhNVbWuqtatOPTYvmVImoBJ9PPKQ6ZQmLTMjBvO\n9+xe3uo+7+zGtwMnjOy3thuT1C77WWrMuOF8JbCh294AXDEy/oruLM/Tge+NLJdJapP9LDVm5f52\nSPIR4PnAk5NsA84H3gFcluRc4E7gpd3unwLOArYCPwBeNYWaJY3Jfpbmw37Duapeto+7ztjLvgW8\ntm9RkqbDfpbmg1cIkySpMYazJEmNMZwlSWqM4SxJUmMMZ0mSGmM4a2xewnM6XvPAhbzmgQuHLkPS\ngAxnSZIaYzhLktQYw1mSpMYYzurF/z5ysnytWRIYzpIkNcdwliSpMYazJsKl7X58+5SkUYazJEmN\nMZwlSWqM4ayJ8czt8bicLWlPhrMkSY0xnCVJaszKoQvQ0rN7afuCp7xy0Dpa53K2pH1x5ixJUmMM\nZ02NJ4ftm7NmSY/HcJYkqTH7DeckFyXZmeTmkbF3JflykhuTfDLJUSP3vSXJ1iRfSfLr0ypc88G3\nV/2ooa8EZj9L8+FAZs4XA2fuMXYV8LNV9XPAV4G3ACQ5BTgHeGb3mL9LsmJi1Urq62LsZ6l5+w3n\nqvo88J09xj5TVbu6m9cAa7vt9cClVfVwVX0d2AqcNsF6JfVgP0vzYRKvOb8a+HS3vQa4a+S+bd3Y\nj0myMcmWJFseffDeCZShlnlp2u3LAAAFqElEQVQC1Nwcg979vOuhKVcoLQO93uec5K3ALuCSxT62\nqjYBmwAOXf2s6lOH5sNoOH3giHMHrGR25iSQgcn182HHHWQ/Sz2NHc5JXgm8CDijqnY343bghJHd\n1nZjkhpmP0ttGSuck5wJvBn45ar6wchdVwIfTvJu4CnAycB/9q5SS87uGeVSnUHP2YzZfpYas99w\nTvIR4PnAk5NsA85n4WzOg4GrkgBcU1W/V1W3JLkMuJWF5bHXVtWj0ype8+81D1y4pAK69VC2n6X5\nsN9wrqqX7WV4n/8CVdXbgbf3KUrSdNjP0nzwP77Q4Ob9RLHWZ8uS5o/hrKbsGXSthrWBLGmavLa2\nJEmNceasprW05O1sWdKsGM6LsOsIT1Qd0qyXvA1jSUNxWVuSpMbkfy4GNGARybeA7wPfHrqWfXgy\nbdbWal3Qbm2t1gU/XttTq+q4oYoZV5IHgK8MXcc+zNPPvxWt1gXzU9uie7mJcAZIsqWq1g1dx960\nWlurdUG7tbVaF7Rd22K0/H1Y2+K1Whcs7dpc1pYkqTGGsyRJjWkpnDcNXcDjaLW2VuuCdmtrtS5o\nu7bFaPn7sLbFa7UuWMK1NfOasyRJWtDSzFmSJGE4S5LUnMHDOcmZSb6SZGuS8wau5YQkn01ya5Jb\nkryhG39bku1Jbug+zhqovm8kuamrYUs3dkySq5Lc3n0+esY1PWPkuNyQ5P4kbxzqmCW5KMnOJDeP\njO31GGXB+7rfvRuTnDpAbe9K8uXu+T+Z5Khu/MQkD44cv/dPs7ZJaaWf7eWx67Kfx69rsr1cVYN9\nACuArwFPA1YBXwJOGbCe44FTu+0jgK8CpwBvA/54yGPV1fQN4Ml7jP0FcF63fR7wzoF/nncDTx3q\nmAHPA04Fbt7fMQLOAj4NBDgduHaA2n4NWNltv3OkthNH95uHj5b62V6e2M/Tfj7wuibay0PPnE8D\ntlbVHVX1CHApsH6oYqpqR1Vd320/ANwGrBmqngO0HtjcbW8Gzh6wljOAr1XVnUMVUFWfB76zx/C+\njtF64EO14BrgqCTHz7K2qvpMVe3qbl4DrJ3W889AM/1sL0+E/byIuibdy0OH8xrgrpHb22ikgZKc\nCDwbuLYbel23XHHREMtNnQI+k+S6JBu7sdVVtaPbvhtYPUxpAJwDfGTkdgvHDPZ9jFr7/Xs1C3/5\n73ZSki8m+fckvzRUUYvQ2vEE7OUe7Ofx9e7locO5SUmeCHwceGNV3Q9cAPwU8CxgB/BXA5X23Ko6\nFXgh8Nokzxu9sxbWUAZ5b1ySVcCLgY91Q60csx8x5DF6PEneCuwCLumGdgD/q6qeDbwJ+HCSI4eq\nb17Zy+Oxn8c3qV4eOpy3AyeM3F7bjQ0myRNYaOZLquoTAFV1T1U9WlWPAR9gYflu5qpqe/d5J/DJ\nro57di/ddJ93DlEbC//IXF9V93Q1NnHMOvs6Rk38/iV5JfAi4OXdPzZU1cNVdW+3fR0Lr+X+9Kxr\nW6Qmjudu9nIv9vMYJtnLQ4fzF4CTk5zU/aV2DnDlUMUkCXAhcFtVvXtkfPR1i98Cbt7zsTOo7fAk\nR+zeZuHkg5tZOF4but02AFfMurbOyxhZAmvhmI3Y1zG6EnhFd5bn6cD3RpbLZiLJmcCbgRdX1Q9G\nxo9LsqLbfhpwMnDHLGsbQzP9bC/3Zj8v0sR7eVpnsx3oBwtn2H2Vhb8m3jpwLc9lYYnkRuCG7uMs\n4B+Am7rxK4HjB6jtaSyc/fol4Jbdxwo4FrgauB34V+CYAWo7HLgXeNLI2CDHjIV/UHYAP2ThNadz\n93WMWDir82+7372bgHUD1LaVhdfJdv++vb/b97e7n/MNwPXAb8765zrm99hEP9vLveqzn8era6K9\n7OU7JUlqzNDL2pIkaQ+GsyRJjTGcJUlqjOEsSVJjDGdJkhpjOEuS1BjDWZKkxvx/fqpoAT8CXPcA\nAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zWKmRev3WKK4", + "colab_type": "text" + }, + "source": [ + "### Visualise Unpruned Results" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "vwwTWZ6xba0q", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 485 + }, + "outputId": "c9ffbb8a-9951-4af8-c63e-b2d215107f1d" + }, + "source": [ + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i+5)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = unpruned_demo.run_on_opencv_image(image, objDet=\"True\")\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()" + ], + "execution_count": 57, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3W3wZGV54P/vNTAwoIw8OA44wzq4\nkli4icpOEf5l1qTCuhJ0BYMaTCqiIU5MGVfj7gquLxhfpCLrrkrcXQmKK24ZH5GCikYlrNHaF7AC\nITz6MCEQh51hiKBYQZCB6//idxp7mn4+p/vc3f39VP3q1336PFx9Zu7f1dd97nN3ZCaSJKkc69oO\nQJIkHcjkLElSYUzOkiQVxuQsSVJhTM6SJBXG5CxJUmFmlpwj4vSI+E5E7IqIC2Z1HEmzZVuW5i9m\ncZ9zRBwEfBd4GbAb+Bbw+sy8o/GDSZoZ27LUjllVzqcAuzLzrsz8KfAZ4MwZHUvS7NiWpRYcPKP9\nbgG+3/V8N/BLg1beeHTks46fUSTSgvq7W/jHzNzUchgTtWWAQ9cdkk876LCZBiUtkn96/Cc8+sRP\nY5JtZpWcR4qIHcAOgE1b4P1faS0UqUi/8ez997Qdw7i62/Ph6zbwsqNObTkiqRzXPHjdxNvMqlv7\nXqC7Ft5aLXtSZl6amdszc/vGY2YUhaS6RrZlOLA9H7rukLkFJy2rWSXnbwEnRsQJEXEIcA5w9YyO\nJWl2bMtSC2bSl5yZ+yPiD4GvAgcBH8/M22dxLEmzY1uW2jGzC72Z+WXgy7Pav6T5WLa2/Nl9XwXg\nN5/18pYjkQZzFJaklfTZfV+de4JexA8GTcfc2d8go44zbPtFOq+jOH2nJEmFsXKWtFKWqbqapVEV\n7rT7HHT+Rx1vVAU/i3jbZOUsSTpAJ4nO84NM53jTJtnOtsuSpE3OkiQVxm5tSQtvkkFCg7pH+y3v\n3u8k3al1Ks5J9jerAWZN769OnIs4iK4JJmdJC23Qdcw63ZvjJoTeY9ftUu33Xrq7aocl6VVLXsvO\nbm1Jkgpj5SxpIU1b3Y6z33Hvte1dr/N80q7YYesPGyRltby8TM6SltK0iauJZD/ph4F+j7Xa7NaW\nJKkwVs6S1DK7p9XLylmSpMKYnCVJc1H39rZVuiZvcpa01Jr+gz7ONJGTJJI6+ystYQ0bVT7OQLt+\ng+3GnUa0jSlHZ8nkLElSYRwQJmlhfOG+R558fFD+CgCP7/vG0G0Oyl+B+4bva5zl/QyqFDux9Tvu\nsGNMsr/XbN5wwHalVIzT3rM9Su895MsuMrPtGHjeCyPf/xU/JyyjVx/32NDXr9yzvvF9j9rnqJjG\n3c+s/caz99+YmdtbDWIKR69/Rr7sqFNnsu9OUutOTE3udxZmFWvT+9XsXPPgdTzw2I9ikm3s1pYk\nqTCWq5qZVx/32MDqc9zqtc6+R1W+o7YfdgwtvllWy4OOY7WrcZmc1YpO0ps0AY6beOvojk3LYV6J\neBQTtcZlt7YkSYWZunKOiOOBTwKbgQQuzcyLI+Jo4LPANuBu4HWZ+WD9UIebdnDQsG37bT9u5TZO\nRThJzL3769522HF6j1G3K3gc86hum4hj0L9RKfHPU2ntuUmlVM29HNilYep0a+8H/n1m3hQRRwA3\nRsQ1wBuBazPzfRFxAXABcH79UPsb5w/wqNcHJeBZ6RfTJMecJPH3HqPu+VoG3d3W/c77Mr/3IYpo\nz02aNClf9qVjGjv2ea/4wdjr2tWtfqbu1s7MPZl5U/X4x8CdwBbgTODyarXLgbPqBilptmzPUlka\nGRAWEduAFwPXA5szc0/10l7WusmKM6xCvHLP+plVz4Oq3nEGIdXtUh/2vhalWpx2INmo/XVbhd6D\nYRaxPdfVZNXcu79Jq2irZ0EDyTking5cAbwjMx+K+Nl91pmZEdF3lpOI2AHsANi0pW4U/ZPalXvW\nj3X9dh6a+IM/yTXsVRhpPOqDxqDXmxw3sGyaaM+Hr2s3uYzbnd10Qh7nOOMkaq9FC2qO1o6I9aw1\n5E9l5herxfdFxHHV68cB+/ptm5mXZub2zNy+cT5tRNIQTbXnQ9cdMp+ApSVWZ7R2AJcBd2bmB7pe\nuho4F3hf9fuqWhGOMO5I5VWqgFbhvY7z7z5tr8kq3udcSnuua1TVPK9qeZzjj6qiraBXW51u7ZcA\nvwPcGhE3V8v+E2uN+HMRcR5wD/C6eiFKmgPbs1SQqZNzZv4fYNBE3qdNu99pnf1nT71wfcXv/2wA\nVPfrT+zsv363J3b23++g5f227z3mNNuNWj4qtit+/96h28zSKl6zXVSltedJlV4x99OJaZIBY1od\nCz1D2Nl/tmXgvaqd1/tZt3MbT+y8myd23t14TIP2u27ntiePOyv9jtF7Dgadr2HncVKDBuL1O96w\n+63HGbneb/mo4zf5XqW6Rn1wKHUSFc3WQidnSZKW0UJ+8UX/bt+7p9rXpNut27lt6HajXp/mmNMY\nFd8Vv99/uybv8W3ifvGmv9mqO6ZZfte05mcRu7R7jeridnDY6onMvrctztXzXhj5/q+M9zlh1HXX\npnSSWyeZLZs2r0VrPL/x7P03Zub2tuOY1NHrn5EvO+rUmey7O0ktQ1LuZ9xr0CbqxXHNg9fxwGM/\nGjSmoy+7tSVJKsxCdmtLWm3DquZFrZilbkuXnM//6MYnH1/05ocOeD6RnU/d3yK56M0PtR2CJGlK\ndmtLklSYpU7Oi1r1SprOMnRpX/alY5bifaiehenWHjVKu+lE/O4tv9jo/uatcz4GdW+f/WdbHLEt\nSYVa6spZkqRFtDCVs6TV5ghtrRIrZ0kq0Khrz865vdxMzpIkFcbkLElSYUzOkiQVxuQsSVJhTM6S\nJBXG5CxJUmFMzpIkFcbkLElSYUzOkiQVpnZyjoiDIuJvIuIvqucnRMT1EbErIj4bEYfUD1PSPNie\npTI0UTm/Hbiz6/lFwAcz83nAg8B5DRxD0nzYnqUC1ErOEbEVeAXwsep5AL8GfKFa5XLgrDrHkDQf\ntmepHHW/lepDwLuAI6rnxwA/zMz91fPdQN8vYo6IHcAOgE3Dv6pZ0nw00p4PX7dhxmGuhvNe8YOh\nr79ms+d5mU1dOUfEK4F9mXnjNNtn5qWZuT0zt2/0296kVjXZng9d52Vpqa46lfNLgFdFxBnABmAj\ncDFwZEQcXH3a3grcWz9MSTNme5YKMnXlnJnvzsytmbkNOAf435n528DXgddUq50LXFU7SkkztQjt\neVg37nmv+MHIbmBpkcziPufzgXdGxC7WrlldNoNjSJoP27PUgroDwgDIzL8G/rp6fBdwShP7lTR/\ntud22QMgcIYwSZKKY3KWJKkwJmdJS8MuYS0Lk7MkFcKJR9RhcpYkqTAmZ0kLx3uetewauZVKkkrT\nSdCXfans+YHH+SBhd/bqsXKWJKkwJmdJC+k1mzeMVVGW2sXd6X5/PL4xdD2r5tVkt7akhfaazRv4\nwn2PDF2nO0E30c39xld+EYBP/MVvTLRddxzLlJQ/u++rAPzms17eciTDdeIcpE78w/a9ffv2ifdn\n5SxJUmGsnCUtvE6VOaqChvYGivV2rx+Uv9J3vUWqmBfJZ/d9dWRlPM46k243qlofxOQsaWmM08Xd\n0XRX96hjjMPEvHx+81kv564HvzfxdnZrS5JUGCtnSUult/qcpKt7mH4DuDoDw+DAburOuuf1WdZv\n3XG6W59iX/8BTP26WHu3H6frdlh37LQDpwbtc1iXcOmDzGbFylmSpMJYOUtaapNchx6mX2U8aFBX\nt0HrPlnh7xu+fW/1OMkAo363OI2z/TjV9yQG3Wo1aJ91jlVXKbeFmZwlLb3uru4mEvW4Ho9vHJCU\nJxnwNShJdPY37D7pUQlmVLLst91vPuvlEyfNcRNd74eBthLjNO9xVuzWliSpMFbOkhbOPKvfOnq7\nsgfF3b38Kd3g901+3GGV56DqcJaDr0qpRmdlVMU9zQxhJmdJC2NW9wHPKtmPFe++nvX2jbntiGvV\nJWn7+u246nxAGbad9zlLkrQEalXOEXEk8DHgXwAJ/C7wHeCzwDbgbuB1mflgrSjVmCd23g3Aup3b\nWo1D5Vnl9jxxRT6qul2gqlZlqls5Xwx8JTOfD7wQuBO4ALg2M08Erq2eq6Y/ufeW2vvoJOZl8Orj\nHms7hLG8+rjHBv4UyPZcgN981sufvIY5z2u1bRyzYx63VA17f53lU00GM2R557XnvvDEyYKlRnKO\niGcALwUuA8jMn2bmD4Ezgcur1S4Hzpr2GJLmw/YslaVOt/YJwP3A/4yIFwI3Am8HNmfmnmqdvcDm\nfhtHxA5gB8CmLTWi0Njsyp6fTmV85Z71tdaZo8ba8+Hrlu/LG674g75v+4CK6eAL3zD2dpOsN6wq\nG7Rd3eOOqljH3X+nEp30e5RnXb1Pe792k/scJTJzug0jtgPXAS/JzOsj4mLgIeBtmXlk13oPZuZR\nw/b1vBdGvv8rwz8nnP1nwzP4+R/dOG7oC+lP7r2Fd2/5xYm3u+jNDw187Yrfv7dOSK169XGPlZLU\n+moiOf/Gs/ffmJmT34MxhSbb89Hrn5EvO+rU2QY8Z1f8wWbO/sgU9zTNwO0fvhCAF7ztvXM/dr9J\nRUo6N6W65sHreOCxH8Uk29S55rwb2J2Z11fPvwCcDNwXEccBVL8dGiGVz/YsFWTqbu3M3BsR34+I\nn8/M7wCnAXdUP+cC76t+X9VIpCuodxBY9/PuKrqzvN+yP+HA7uxxR2sPGrDUW+X1q/76bTtOlTvu\nMcc16f4K62aeK9vzYrjiDzbDHZcA8G0r1qVWdxKStwGfiohDgLuAN7FWjX8uIs4D7gFeV/MYK6s3\n2Y7brd297rBu7X7GTbZNblvnmOPub9R+2xw9XVAXve25UKOuLTedpLuvn4573VjNqpWcM/NmoN81\nsdPq7FfS/NmepXI4fecS6de9Pa5h1eaVe9aPrCxHVX+DXh+0vLNs0op2nCq033tto3Kd9j1qtYwz\nMrrpQVndc0WX8hWKq8bpOyVJKoyV8xKZpmKGZq55Dtp+UHU4y8FXVqJaBuPeS9y7flMVtJVyu0zO\nWjqFDK4aaZVHh2uwSZNyv+0dxb347NaWJKkwJmdJWiDPP+ktPP+kt7QdhmbM5KwnR2M3fa1278Wv\nHXrMcUaBz8Kg99rkORj1/jrX+e3SltSPyVmSpMI4IKxA7/+9Fz1l2Z9w4PSd3d+A8ydDtoNvDjzO\nay95/ZOP91dz6I+qHLu36d6udzkAhwLV8v3v7b/O/vdOfsxh+4M3sP+9w/f3s3PX/f4/+ZTjff4t\nnx66n3FNc6/2monmydeKef5Jb+Hb1VSevWY1c5jmZ+pvpWqS30p1oE6S/Y8fu7n2vtr4Vqp+3dnH\nvv3zU+2rjaktm07O05rnt1I1yW+lqnecYQZdax6UpDtmGbujw0eb97dSSZKkGbBbW43prZjfdujP\n/nt9+OLXTl09S8tu2oq5+/Vh1bPV7eIxOS+pSb+NqkndSbl72aDU3H1N1vmmtWrqJmYtJ7u1JUkq\njJXzEmqjat578Wv7VszdBg206q6WndJSkkzOS6WtpAz9u7IHee0lr+87EtqErFXSdHd2Z31vr1oO\ndmtLklSYhamcO/fkDrrfuc0BUM1bmzjkojfP7ghN3eM8ScXcrZR7iaVl48jt5WDlLElSYRamclZ5\n9l782rUpOiVNZNj1Zm+dEixg5bxM03QuulFd2h9+dP+cIpEWwxV/sHkuiXnU10qOikPtW7jkLEnS\nsqvVrR0RfwT8HpDArcCbgOOAzwDHADcCv5OZP60Z5wE61fNyDQJr1sgehhoDwp6cprOBLu29TutZ\njLbas2ZnnNurHBxWpqmTc0RsAf4dcFJm/iQiPgecA5wBfDAzPxMRlwDnAR9pJFrglBvufPLxFV3L\nR31r1So4YAT2DL9xapLu7M7jQdu87dCDn/xaSUdut6et9rxKvM6sSdTt1j4YOCwiDgYOB/YAvwZ8\noXr9cuCsmseQNB+2Z6kQU1fOmXlvRPwX4B+AnwBfY63b64eZ2SmddgN9S9qI2AHsANjUQNE7q+8m\nbkPJ9wAPq5qHDQD78KP7x5res8T3vAqabM+Hr9sw+4ClJVenW/so4EzgBOCHwOeB08fdPjMvBS4F\neN4LI6eNQ/PRxHXmUV3cUPYHk2XWZHs+ev0zbM9dSvjWqWETkzitZ5nqdGv/a+DvM/P+zHwM+CLw\nEuDIqlsMYCuwPCWttLxsz1JB6iTnfwBOjYjDIyKA04A7gK8Dr6nWORe4ql6IKsHbDj146i7tOutq\nbmzPUkGmTs6ZeT1rA0VuYu22i3WsdWudD7wzInaxdvvFZQ3EqRZ1upr7+fCj+022S8D2PBujRmjP\nc5T2OBOTqBy17nPOzAuBC3sW3wWcUme/kubP9iyVw7m1NdCwihnqdU+PGhzmwDAtshIGgQ07toPD\nyuf0nSraqA8IkrSMTM6SJBXG5Ky+5jUIbJz9vPaS11tBS1opJmdJkgpjclbrxq3ErZ5VunG+r7mE\nL7nwtqrymZx1gL0Xv/ZnU3XOmfdLa5GVPEJ7kFEJ2iTdHpOzJEmF8T5nHWCS72qeBe9/1qJZxIpZ\n5TM5C5jthCOSytX58ODEJGWxW1uSpMKYnNXaADBp0dmlrVkxOav168yS2jfqg4Qjt+fL5CxJUmFM\nziuszXuaRxk1MYkTkkhaZo7WXmGL0J394Uf3e1uVirOs15oduV0OK2dJkgpjcl5Ri3Rfs13cKski\nzJ2txWdyXjElX2ceZVSCNklrlsb5Uotl4cjt9pmcJUkqjAPCVswiDAKT1L5xB4dpNqycJUkqzMjk\nHBEfj4h9EXFb17KjI+KaiPhe9fuoanlExJ9GxK6IuCUiTp5l8JrMsGuyowZdaTnYnqXFME7l/Ang\n9J5lFwDXZuaJwLXVc4BfB06sfnYAH2kmTNUxarDUIiVlR27X9glsz1LxRibnzPwm8EDP4jOBy6vH\nlwNndS3/ZK65DjgyIo5rKlhJ9diepcUw7YCwzZm5p3q8F+iMDNgCfL9rvd3Vsj30iIgdrH0aZ9OW\nKaPQSMtSMWumGm3Ph6/bMLtIC7RMt1D1M2pgmGaj9oCwzEwgp9ju0szcnpnbNx5TNwqtGu95no0m\n2vOh6w6ZQWTtuP3DF3L7hy9sO4wiDPsQMuoecE1u2uR8X6d7q/q9r1p+L3B813pbq2WSymV7lgoz\nbXK+Gji3enwucFXX8jdUozxPBX7U1V0mNWqcwWFW0GOxPUuFGXnNOSI+Dfwq8MyI2A1cCLwP+FxE\nnAfcA7yuWv3LwBnALuBh4E0ziFljWKS5s+sa9s1VsHYu/OaqNbbn8diVrbaNTM6ZOeiv/Gl91k3g\nrXWDkjQbtmdpMTh9p5ZCpyfA737WvDmKWbPg9J1LyJnApOnZpT09z11zTM6SJBXGbu0lskqDwKRZ\necHb3vuUZd/2Ht6hzv7IfdWjp547TcfKWZKkwlg5LwErZmm2flYZ+j3GvbrPjZpj5bzgTMwHGvV+\nnZREdZmM1pz9kfs8FzNkcpYkqTB2a0vShKwYNWtWzlo6zrktadGZnCVJKozd2gvKgWCjjTOlp9N5\nSiqRlfMCMjFPxi5uSYvG5CxJUmHs1l4gVsyStBqsnCVJKozJWZKkwpicF4Tf0VzPOPc+S1IpvOZc\nOK8zS9LqsXKWJKkwVs5aKR9+dP/QSUkkqQRWzpIkFWZkco6Ij0fEvoi4rWvZ+yPi2xFxS0RcGRFH\ndr327ojYFRHfiYiXzyrwVWAlp6bZnqXFME7l/Ang9J5l1wD/IjN/Efgu8G6AiDgJOAd4QbXN/4iI\ngxqLVk9yhPb0VvzcfQLbs1S8kck5M78JPNCz7GuZ2fnrdh2wtXp8JvCZzHw0M/8e2AWc0mC8kmqw\nPUuLoYlrzr8L/GX1eAvw/a7XdlfLniIidkTEDRFxw0M/aCCKJTFOV/YKV32avdrt+dEnfjrjEKXl\nVys5R8R7gP3ApybdNjMvzcztmbl94zF1olgdK94d27hR53LVrvk31Z4PXXdI88FJK2bqW6ki4o3A\nK4HTMjOrxfcCx3ettrVaJqlgtmepLFNVzhFxOvAu4FWZ+XDXS1cD50TEoRFxAnAi8H/rh7k6Pv+W\nTw987W2HHjzwHl1pWrZnqTwj/9JHxKeBXwWeGRG7gQtZG815KHBNRABcl5lvyczbI+JzwB2sdY+9\nNTMfn1Xwy2jUHNqar70XvxaAY9/++ZYjaYbtWVoMI5NzZvbLFpcNWf+PgT+uE5Sk2bA9S4vBGcK0\nsrxEIKlU/nUqRKf7lEMHr2Myac/ei1+7NF3bkspn5SxJUmFMzpIkFcZ+0pY92Z1dGfaVhpqv3tHx\nyzZyW1K5rJwlSSpM/GwyoBaDiLgf+CfgH9uOZYBnUmZspcYF5cZWalzw1Niek5mb2gpmWhHxY+A7\nbccxwCL9+5ei1LhgcWKbuC0XkZwBIuKGzNzedhz9lBpbqXFBubGVGheUHdskSn4fxja5UuOC5Y7N\nbm1JkgpjcpYkqTAlJedL2w5giFJjKzUuKDe2UuOCsmObRMnvw9gmV2pcsMSxFXPNWZIkrSmpcpYk\nSRSQnCPi9Ij4TkTsiogLWo7l+Ij4ekTcERG3R8Tbq+U7I+LeiLi5+jmjpfjujohbqxhuqJYdHRHX\nRMT3qt9HzTmmn+86LzdHxEMR8Y62zllEfDwi9kXEbV3L+p6jWPOn1f+9WyLi5BZie39EfLs6/pUR\ncWS1fFtE/KTr/F0yy9iaUkp7ti1PHZftefq4mm3LmdnaD3AQ8HfAc4FDgL8FTmoxnuOAk6vHRwDf\nBU4CdgL/oc1zVcV0N/DMnmX/GbigenwBcFHL/557gee0dc6AlwInA7eNOkfAGcBfAgGcClzfQmz/\nBji4enxRV2zbutdbhJ+S2rNtubF/T9vz+HE12pbbrpxPAXZl5l2Z+VPgM8CZbQWTmXsy86bq8Y+B\nO4EtbcUzpjOBy6vHlwNntRjLacDfZeY9bQWQmd8EHuhZPOgcnQl8MtdcBxwZEcfNM7bM/FpmduYJ\nvQ7YOqvjz0Ex7dm23Ajb8wRxNd2W207OW4Dvdz3fTSENKCK2AS8Grq8W/WHVXfHxNrqbKgl8LSJu\njIgd1bLNmbmnerwX2NxOaACcA3y663kJ5wwGn6PS/v/9Lmuf/DtOiIi/iYhvRMS/aiuoCZR2PgHb\ncg225+nVbsttJ+ciRcTTgSuAd2TmQ8BHgH8OvAjYA/zXlkL75cw8Gfh14K0R8dLuF3OtD6WV4fcR\ncQjwKqDzrRClnLMDtHmOhomI9wD7gU9Vi/YA/ywzXwy8E/jziNjYVnyLyrY8Hdvz9Jpqy20n53uB\n47ueb62WtSYi1rPWmD+VmV8EyMz7MvPxzHwC+Chr3Xdzl5n3Vr/3AVdWcdzX6bqpfu9rIzbW/sjc\nlJn3VTEWcc4qg85REf//IuKNwCuB367+2JCZj2bmD6rHN7J2Lffn5h3bhIo4nx225Vpsz1Nosi23\nnZy/BZwYESdUn9TOAa5uK5iICOAy4M7M/EDX8u7rFq8Gbuvddg6xPS0ijug8Zm3wwW2sna9zq9XO\nBa6ad2yV19PVBVbCOesy6BxdDbyhGuV5KvCjru6yuYiI04F3Aa/KzIe7lm+KiIOqx88FTgTummds\nUyimPduWa7M9T6jxtjyr0Wzj/rA2wu67rH2aeE/Lsfwya10ktwA3Vz9nAP8LuLVafjVwXAuxPZe1\n0a9/C9zeOVfAMcC1wPeAvwKObiG2pwE/AJ7RtayVc8baH5Q9wGOsXXM6b9A5Ym1U53+v/u/dCmxv\nIbZdrF0n6/x/u6Ra9+zq3/lm4Cbg387733XK91hEe7Yt14rP9jxdXI22ZWcIkySpMG13a0uSpB4m\nZ0mSCmNyliSpMCZnSZIKY3KWJKkwJmdJkgpjcpYkqTAmZ0mSCmNyliSpMCZnSZIKY3KWJKkwJmdJ\nkgpjcpYkqTAmZ0mSCmNyliSpMCZnSZIKY3KWJKkwJmdJkgpjcpYkqTAmZ0mSCmNyliSpMDNLzhFx\nekR8JyJ2RcQFszqOpNmyLUvzF5nZ/E4jDgK+C7wM2A18C3h9Zt7R+MEkzYxtWWrHwTPa7ynArsy8\nCyAiPgOcCfRt0MccdnAev3H9jEKRFtPf7nvkHzNzU8thTNSWAWLDwRlHHDKn8EbL9Y/3Xf4vn/0L\nTz6+8f/dOvb+erfrPB+1j0HH+5fP/oWpth203Tj7G3WMm+4Z/tnr5OecNHKdaZ38nJPGjqN7/UnW\n7Tbt+5jkHOSPf0o+sj8m2f+skvMW4Ptdz3cDv9S9QkTsAHYAbD1iPdf81vNmFIq0mJ71odvuaTsG\nxmjLcGB7jqevZ8PZ5bTnR459qO/yG3be8OTjdTu3jb2/3u06z0ftY9Dxbth5w1TbDtpunP2NOsbh\nb/nF4etecsPIdaZ1wyXjx9G9/iTrdpv2fUxyDh65YtfE+59Vt/ZrgNMz8/eq578D/FJm/mG/9V+0\n+bA0OUsHetaHbrsxM7e3GcOkbRlg3abDs7TkvGHvxpnt/+FLbgGm/yNfks4HmVmer1X0yBW7eOL+\nh4uonO8Fju96vrVaJmmx2JYZP8FPkty6K/rO+oOq/En1O36/46lcsxqt/S3gxIg4ISIOAc4Brp7R\nsSTNjm1ZasFMKufM3B8Rfwh8FTgI+Hhm3j6LY0manVVpy+NUrKPW6X590gq4qYp53P01fTw1b1bd\n2mTml4Evz2r/kuZjmdqySUmLwhnCJEkqzMwqZ0kqxSwr5mluWyrdrEe4azQrZ0nSUzxy7ENeBmiR\nyVmSpMLYrS1JBRvUvdxb1TZ9r7TaZXKWpIL1Xv8dlHybTspec26X3dqSJBXGylnS0uvu8h01tWWJ\n5h3fvKvmzvzk/dSds3zQvkftd1hM4+6jDpOzpJUxKOls2Lux+AS9jEZ9acg4CbLuvocl2GGvPXzJ\nLTP90hO7tSVJKoyVs6SVV3rVPM6AsGV0+Ft+carq9OFLbpn5V3h2xzYLVs6SJBXGylnSyhhU6Ywz\nBecTO+8ee9vOusP2O846kxy33/66tx10nH77r3OddlzzqG7rxjHs/c46fpOzpKXX749sd/fwEzvv\nHplIByW9YXoHmg0bkNYbV+d2EYR1AAAWC0lEQVQY3TF3f7jo3nfvfvtty7FPPd7Dl9zS932NSjyl\nJNZZ6u627vehbtbv325tSZIKY+UsaSWNc/vUoK7nzvPeCrq3gh3n253GqnorvYOQ+u37iZ139922\ne93OPsZ9X93HXwTTDiQbtr9edmtLUkN6uyc7f1zHuX47jn7JflDX86D46v7Bn2RyjXG75xfZqElI\nBr0+zr+F9zlLkrRCrJwlLb1xZqCatEqe1Dhd3LM+fke/9zpsgNkimrbqHbcanvV9ziZnSSuru2tz\nHglpmZJfE2bZLbzo7NaWJKkwJmdJS6/7SwrGsW7nNtbt3MYTO++eyaCp3lgeOfYhHjn2oSe7Smc6\nLeQY72vYIKmmYhunWu4MyprmizGGxTpqv6P2PQ9Td2tHxPHAJ4HNQAKXZubFEXE08FlgG3A38LrM\nfLB+qJJmZRnbc7+R08P+4A67rWrSBD3qlqROguw9bvfjUclhULyTzL09dPaxYw9c1nsrVpO3KM3j\n26emiWnUtrPsjq9TOe8H/n1mngScCrw1Ik4CLgCuzcwTgWur55LKZnuWChKZ2cyOIq4C/lv186uZ\nuScijgP+OjN/fti2L9p8WF7zW89rJA5pWTzrQ7fdmJnb2zh2nfa8btPhueHs9tvzONNmjtpulFHT\nfral834neS+DRms7iK2+R67YxRP3PxyTbNPIaO2I2Aa8GLge2JyZe6qX9rLWTdZvmx3ADoCtR6xv\nIgxJDajbnuPpi9mel+mrGJfpvayq2gPCIuLpwBXAOzLzgP8RuVaW9y3NM/PSzNyemduPOeygumFI\nakAT7ZkN3qEp1VUrOUfEetYa8qcy84vV4vuq7i+q3/vqhShpHlaxPd/9w+O5+4fHW2mqOFMn54gI\n4DLgzsz8QNdLVwPnVo/PBa6aPjxJ82B7lspSp//pJcDvALdGxM3Vsv8EvA/4XEScB9wDvK5eiJLm\nYCXb87HPv73tEIph70FZpk7Omfl/gEGjz06bdr+S5m8V23MTyWjdzm3s/fYLgAMTfWfZoOW9HwoG\nLe+8VvdDRBP70Hw5Q5gkSYVxWKWkldR9X+8kuqvQ7gq5+3Hv+uMsm2b5IP3i02IxOUvSGMZJxKUo\nPT6NZre2JEmFMTlLWkmTdGkvQyW699svWIr3sSrs1pakAUxmaouVsyRJhTE5S1pJfsuSSmZylrSS\nnBFLJTM5S5JUGAeESVopVsxaBFbOklbKhr0bvd6s4pmcJUkqjN3aktTD+5vVNitnSZIKY+UsaaU4\nIEyLwMpZkqTCmJwlSSqMyVnSSvFWKi0Ck7MkSYUxOUuSVJjayTkiDoqIv4mIv6ienxAR10fEroj4\nbEQcUj9MSfOwCu35kWMfcsS2itdE5fx24M6u5xcBH8zM5wEPAuc1cAxJ82F7lgpQKzlHxFbgFcDH\nqucB/BrwhWqVy4Gz6hxD0nzYnqVy1K2cPwS8C3iien4M8MPM3F893w1sqXkMSfNhe8apO1WGqZNz\nRLwS2JeZN065/Y6IuCEibvjBTx6fNgxJDWiyPfPI/tEbSBqqzvSdLwFeFRFnABuAjcDFwJERcXD1\naXsrcG+/jTPzUuBSgBdtPixrxCGpvsba87pNh9uepZqmrpwz892ZuTUztwHnAP87M38b+Drwmmq1\nc4GrakcpaaZsz1JZZnGf8/nAOyNiF2vXrC6bwTEkzYftWWpBI99KlZl/Dfx19fgu4JQm9itp/mzP\nUvucIUySpMKYnCVJKozJWZKkwpicJUkqjMlZkqTCmJwlSSqMyVmSVohzhy8Gk7MkSYUxOUvSCjn2\n+be3HYLGYHKWJKkwJmdJkgpjcpYkqTAmZ0mSCmNyliSpMCZnSeriaGZ45NiH2g5h5ZmcJUkqjMlZ\nkqTCmJwlaYU4fediMDlLklQYk7Mk9Tj2+bcv7cCwZX1fy8bkLElSYUzOkiQVplZyjogjI+ILEfHt\niLgzIv6/iDg6Iq6JiO9Vv49qKlhJs2N7lspRt3K+GPhKZj4feCFwJ3ABcG1mnghcWz2XDrDpg7ey\n6YO3th2GDmR7lgoxdXKOiGcALwUuA8jMn2bmD4Ezgcur1S4HzqobpKTZsj1LZalTOZ8A3A/8z4j4\nm4j4WEQ8DdicmXuqdfYCm+sGqeXVRvVs1d6X7VkqyME1tz0ZeFtmXh8RF9PT5ZWZGRHZb+OI2AHs\nANh6xPoaYWgR3f9Hv9B2CMV68oPDh2Keh22sPcfTbc/LwPm121Wnct4N7M7M66vnX2Ctcd8XEccB\nVL/39ds4My/NzO2Zuf2Yww6qEYakBjTWntlQ5zO/JKiRnDNzL/D9iPj5atFpwB3A1cC51bJzgatq\nRShp5mzP/S3zZCQqW92PuG8DPhURhwB3AW9iLeF/LiLOA+4BXlfzGFoQw67j9nZjd9YdZ3n3fgd1\nh/c7dt2u8959jjp2E131mz54a5td/ivRnjfs3QhM1m27igl6mvOk5tRKzpl5M7C9z0un1dmvpPmz\nPUvl8OKQGjGo4qs7KnqcSrJ3nSaOCU+t3kdVyC1XvZrQhr0brQpVLKfvlCSpMFbOqmXc662TVpXj\n7HfQOp3nk14LHrb+/X/0CwMrcqvlxeV11f4650XtMTlrpqZNXE0k+2k+DPQ+1nIw+U7G89U+u7Ul\nSSqMlbNUsXt6uXR3WY/TTbsq1eI4A+Hs1m6fyVkqTEvTd660cRLzMiWsUdfax/1Ao9mxW1uSpMKY\nnDUXTQ+y6oyeHrbfSb59qs7+mvyWK7vW5+uRYx8auzu7s273z6T7kMYVmX2/ZGauXrT5sLzmt57X\ndhga07EfOPopyx6Pbwzd5qD8lb7rj7u8n0mPOe6xp93fsHWm8cS6b96Ymf1m7Crauk2H54azy2nP\nw7po55FUS+weXrVu/LY9csUunrj/4YmuU1k5S5JUGAeEaWp73/nAk4/vZ3h37N6e5531Ry3vV6V3\nTFulDtpu3P11v2848L33vp9pDHvPatY8JiHp7LuUStQu+MVgclZRFiExdcfYm6i1mOYxz3bbI6An\neX+lfJBYZXZrS5JUGCtnFWMRquZex37gaKvnJTGsWmyqqm6ri9uu7MVjclbrFjEpd+vEb5JeXm2O\n9m6CX4+5eOzWliSpMFbOatU4VfMrLrxoDpEM96X3nj9yHQeKrY5FrEInGZne9uA1WTlLklQcK+cJ\nHPRjT1e3Sc7H40fsP+D5uNeZS6ia4cA4JqmiraCXT52quYRq1OvPi8Fso7lblK7sQSZJ1I7mXj7T\nJre2E7MJebHYrS1JUmFqVc4R8UfA7wEJ3Aq8CTgO+AxwDHAj8DuZ+dOacWoJLHrF3M8rLrxorOoZ\nyu/iXoX23DvQqbuanKSynWbaz95151lJWzUvnqmTc0RsAf4dcFJm/iQiPgecA5wBfDAzPxMRlwDn\nAR9pJFotrGGJedEScq9O/Ivcxb3s7bk7OQ1KVNNMEDIo0U8aU7/9da83bSKfNim33QWv+t3aBwOH\nRcTBwOHAHuDXgC9Ur18OnFXzGJLmw/YsFWLqyjkz742I/wL8A/AT4GusdXv9MDM7Q3N3A1v6bR8R\nO4AdAFuPWD9tGCrcpo8O/wS+6FVzt3EGipXaxd1ke46nL3Z7nrZSbWIUdPex+1X73ceYx3Sjak+d\nbu2jgDOBE4AfAp8HTh93+8y8FLgU4EWbD8tp41C5hiXmZUrKy6DJ9rxu0+EL3Z7rdOkO6paexLD1\nexN2E8dTmep0a/9r4O8z8/7MfAz4IvAS4MiqWwxgK3BvzRglzZ7tWSpIneT8D8CpEXF4RARwGnAH\n8HXgNdU65wJX1QtRi2bTRzeufNX8igsvGvo+C/yyj6Vuzxv2bnyyyuw87q06+y1r+thNe+TYh6yU\nl9TUyTkzr2dtoMhNrN12sY61bq3zgXdGxC7Wbr+4rIE4pYU0KkGXkqRXpT0PSshNJ89O0uz+maWm\nj2PCb1+t+5wz80Lgwp7FdwGn1NmvpPmzPUvlcPpONWaVRmZPYtR90CXf/7xsmpiEZNCI6jbVud9a\nZXL6TjXCxDzaonRxL6PuLt9BXcD91pnk9TYMunbexH7VLpOzJEmFMTlLUpfee4nnXSUPG6DWvdwK\nebmZnCVJKowDwlSL15ql6fQbxNWvMu4dxNZ0xexgsjKZnDU1E/Pkxhm5rdXQLxGOM7f2OEaNJp/m\nG7g0X3ZrS5JUGJOzGjdq6krZqzBvk8wC1nY1OWwA2rjdzuMOYrMbu1x2a0sS7SflcY3qkjbhLgcr\nZ0mSCmPlLEksTsU5qsLfsHfjRO9lUd73qrFylrT0SplucxzjTkIy7T7GsSjnapmZnCVJKozd2pLU\nkGH3F/d2N087Ccm08Uz6TVreC90uk7MkTak74daZ3WtYAnQO7dVkt7YkSYWxcpakGmZd9dZRZ2BX\n27GvOpOzJE1pWUc1m5jbZ7e2JEmFsXLWRIZ9E5XzRatUk45Ultpm5SxJUmFGJueI+HhE7IuI27qW\nHR0R10TE96rfR1XLIyL+NCJ2RcQtEXHyLIOXNJlVbc+LNEOYBONVzp8ATu9ZdgFwbWaeCFxbPQf4\ndeDE6mcH8JFmwpSWT0tfrfkJVqw9d6ay7P1ZZYPOieemHCOTc2Z+E3igZ/GZwOXV48uBs7qWfzLX\nXAccGRHHNRWspHpsz9JimHZA2ObM3FM93gtsrh5vAb7ftd7uatkeekTEDtY+jbP1iPVThiGpAY22\n53j6YrTnSSrE3pm+FqWL3Cp4cdUerZ2ZGRE5xXaXApcCvGjzYRNvL6l5TbTndZsOX7r23JvkTHqa\ntWlHa9/X6d6qfu+rlt8LHN+13tZqmaRy2Z6lwkybnK8Gzq0enwtc1bX8DdUoz1OBH3V1l2kJ3P/m\nwd15X3rv+XzpvefPMRo1xPYsFWZkt3ZEfBr4VeCZEbEbuBB4H/C5iDgPuAd4XbX6l4EzgF3Aw8Cb\nZhCzpCnZnqXFMDI5Z+brB7x0Wp91E3hr3aAkzYbtWVoMzhAmSVJhTM6SJBXG5CxJUmFMzpIkFcbk\nLElSYUzOkiQVpvb0nVKvzkQkLXzj0kJxwhZJg1g5S5JUGJOzJEmFMTlrasPm2Qa7bSVpWiZnSZIK\nY3KWWmCvgqRhHK0tzdGopLz3nQ9w7AeOnlM0kkpl5SxJUmFMzpIkFcbkrFruf/NDQ0dtf+m953t9\nVZImZHKWJKkwJmc1wnueJak5jtaW5mTYB5S973xgjpFIKp2VsyRJhbFyVmM6XdubPrqx7+ur+m1V\n49zbLEndrJwlSSrMyOQcER+PiH0RcVvXsvdHxLcj4paIuDIijux67d0RsSsivhMRL59V4CrXOLdX\nqR22Z2kxjNOt/QngvwGf7Fp2DfDuzNwfERcB7wbOj4iTgHOAFwDPBv4qIn4uMx9vNmwtgvvf/NDI\nLu5ey9LlPc4HkJa6sz+B7Vkq3sjKOTO/CTzQs+xrmbm/enodsLV6fCbwmcx8NDP/HtgFnNJgvJJq\nsD1Li6GJAWG/C3y2eryFtcbdsbta9hQRsQPYAbD1iPUNhKESjRok1mvRB42N22Vf8CCw2u05nm57\nluqqlZwj4j3AfuBTk26bmZcClwK8aPNhWScOlW/Sb1vqTnIlJ+pJr58XnJQba8/rNh1ue5Zqmjo5\nR8QbgVcCp2VmpzHeCxzftdrWapmkgtmepbJMlZwj4nTgXcCvZObDXS9dDfx5RHyAtQEkJwL/t3aU\nKtKmj24cOW1nt07VOOn3FZdWRU872rzUqtn2LJVnZHKOiE8Dvwo8MyJ2AxeyNprzUOCaiAC4LjPf\nkpm3R8TngDtY6x57qyM7l1vnWvKkSXrSBN3R1jXpOrd/lZSUbc/SYhiZnDPz9X0WXzZk/T8G/rhO\nUJJmw/YsLQan79REBo26nraLu9e4FfU490lP2h3e5OQoJVXLkhaPyVlFmfa6dMegBDuvWclMypKa\n4NzakiQVxspZRequQKetoufBSlnSLJicJ/D4EftHr7Tkho20HncWsGVS8gcHSYvLbm1JkgoTP5sM\nqMUgIu4H/gn4x7ZjGeCZlBlbqXFBubGVGhc8NbbnZOamtoKZVkT8GPhO23EMsEj//qUoNS5YnNgm\nbstFJGeAiLghM7e3HUc/pcZWalxQbmylxgVlxzaJkt+HsU2u1LhguWOzW1uSpMKYnCVJKkxJyfnS\ntgMYotTYSo0Lyo2t1Lig7NgmUfL7MLbJlRoXLHFsxVxzliRJa0qqnCVJEiZnSZKK03pyjojTI+I7\nEbErIi5oOZbjI+LrEXFHRNweEW+vlu+MiHsj4ubq54yW4rs7Im6tYrihWnZ0RFwTEd+rfh8155h+\nvuu83BwRD0XEO9o6ZxHx8YjYFxG3dS3re45izZ9W//duiYiTW4jt/RHx7er4V0bEkdXybRHxk67z\nd8ksY2tKKe3Ztjx1XLbn6eNqti1nZms/wEHA3wHPBQ4B/hY4qcV4jgNOrh4fAXwXOAnYCfyHNs9V\nFdPdwDN7lv1n4ILq8QXARS3/e+4FntPWOQNeCpwM3DbqHAFnAH8JBHAqcH0Lsf0b4ODq8UVdsW3r\nXm8Rfkpqz7blxv49bc/jx9VoW267cj4F2JWZd2XmT4HPAGe2FUxm7snMm6rHPwbuBLa0Fc+YzgQu\nrx5fDpzVYiynAX+Xmfe0FUBmfhPo/TaKQefoTOCTueY64MiIOG6esWXm1zKzM2n7dcDWWR1/Dopp\nz7blRtieJ4ir6bbcdnLeAny/6/luCmlAEbENeDFwfbXoD6vuio+30d1USeBrEXFjROyolm3OzD3V\n473A5nZCA+Ac4NNdz0s4ZzD4HJX2/+93Wfvk33FCRPxNRHwjIv5VW0FNoLTzCdiWa7A9T692W247\nORcpIp4OXAG8IzMfAj4C/HPgRcAe4L+2FNovZ+bJwK8Db42Il3a/mGt9KK3cGxcRhwCvAj5fLSrl\nnB2gzXM0TES8B9gPfKpatAf4Z5n5YuCdwJ9HxOp97VdNtuXp2J6n11Rbbjs53wsc3/V8a7WsNRGx\nnrXG/KnM/CJAZt6XmY9n5hPAR1nrvpu7zLy3+r0PuLKK475O1031e18bsbH2R+amzLyvirGIc1YZ\ndI6K+P8XEW8EXgn8dvXHhsx8NDN/UD2+kbVruT8379gmVMT57LAt12J7nkKTbbnt5Pwt4MSIOKH6\npHYOcHVbwUREAJcBd2bmB7qWd1+3eDVwW++2c4jtaRFxROcxa4MPbmPtfJ1brXYucNW8Y6u8nq4u\nsBLOWZdB5+hq4A3VKM9TgR91dZfNRUScDrwLeFVmPty1fFNEHFQ9fi5wInDXPGObQjHt2bZcm+15\nQo235VmNZhv3h7URdt9l7dPEe1qO5ZdZ6yK5Bbi5+jkD+F/ArdXyq4HjWojtuayNfv1b4PbOuQKO\nAa4Fvgf8FXB0C7E9DfgB8IyuZa2cM9b+oOwBHmPtmtN5g84Ra6M6/3v1f+9WYHsLse1i7TpZ5//b\nJdW6Z1f/zjcDNwH/dt7/rlO+xyLas225Vny25+niarQtO32nJEmFabtbW5Ik9TA5S5JUGJOzJEmF\nMTlLklQYk7MkSYUxOUuSVBiTsyRJhfn/AWVW8SJF7zXlAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8qYiiUqGWR1r", + "colab_type": "text" + }, + "source": [ + "### Visualise Pruned Model Results" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PWwT_DbRT2ID", + "colab_type": "code", + "outputId": "c64bf2b1-f885-4378-f7e8-58fac64275e8", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 485 + } + }, + "source": [ + "# Visualise Results\n", + "rows = 2\n", + "cols = 2\n", + "fig = plt.figure(figsize=(8, 8))\n", + "for i in range(1, rows*cols+1):\n", + " img = dataset.load_image(i+5)\n", + " image = np.array(img)[:, :, [2, 1, 0]]\n", + " result = pruned_demo.run_on_opencv_image(image, objDet=\"True\")\n", + " \n", + " fig.add_subplot(rows, cols, i)\n", + " plt.imshow(result)\n", + "plt.show()" + ], + "execution_count": 58, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAHVCAYAAADLvzPyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3WvQZXV14P/vohtoUBDBtul0MwFH\nRgcTL1SX45QZTYXJSMARDF4wqYjK2NEyjsaZEYwvwKp/ShlnMMSZkbTiiFNGUZCCik6QYbzUvIDY\nGOSOtgRid3XTBC9YUZCG9X9x9oHTT5/73ufs3znP91PV1efZZ1/W2d2/Zz1r7d/eT2QmkiSpHAe1\nHYAkSdqfyVmSpMKYnCVJKozJWZKkwpicJUkqjMlZkqTCzCw5R8SpEXFPROyIiPNndRxJs+VYluYv\nZnGfc0SsAb4H/DawE/g28KbMvLPxg0maGcey1I5ZVc4vBXZk5r2Z+UvgC8AZMzqWpNlxLEstWDuj\n/W4Cftjz9U7gXwxa+cijI5993IwikRbUD27lHzJzfcthTDSWAQ496JB82prDZhqUtEj+8fFf8OgT\nv4xJtplVch4pIrYCWwHWb4KP/nVroUhF+t1f2Xd/2zGMq3c8H37QOn77mS9rOSKpHNf/+MaJt5lV\nW3sX0FsLb66WPSkzt2XmlszccuQxM4pCUl0jxzLsP54PPeiQuQUnLatZJedvAydGxAkRcQhwNnDt\njI4laXYcy1ILZtJLzsx9EfFHwHXAGuDTmXnHLI4laXYcy1I7ZnahNzO/Cnx1VvuXNB/LNpav2Hsd\nAG989qtajkQazFlYklalK/ZeN/cEvSg/GHTjHKRO/IP2PWqfo2IaZx+LxMd3SpJUGCtnSavKMlVX\nTRtV2Y9TvY7a/6h9j/r3WS3/fiZnSdJYuolx0ksCi9LOL4ltbUmSCmPlLGnhDWu3rqzWBlVx/Zb3\n7neSVm/TE6bqtoLHPW4Jle2oOKadULZoTM6SFtqgb+Z1ro+Om/RWHnsW12Sv2HvdWNeCly05DbJa\nzoFtbUmSCmPlLGkhTVPdjlPZjlOBDTr2yuOMW8kNW/+Nz37Vwrdyp51I1m8fwyzTxDOTs6SlNKsk\nME6CmaTN3ru8blu8rlk9fGSczzjqB6hxYmv7/DXJtrYkSYWxcpa0apRaXZXShp1mlnQT+x826W3W\nD0YplZWzJEmFMTlLkoD9K9iVul2HWVSqTd2nvUxsa0taaONMMJp0stHKr3uTxzizpyeZNTyq1d77\n/qQPSBlH777HTXDT3ms87J70pmbILwsrZ0mSCmPlrFpeu/Gxoe9fvfvgmex30n139zdtPCrDlQ88\nMva63UpqTb4SHhh/X1fsva6zTa8+24885pDtJv0co/bX1NOxmqhC+1W4vfuddN8rY5rkUa2LzOSs\nqb1242NDk904CXaUQft/7cbHxk64TcShsjwe3xx79u7rNqx76ou9T30Dfx3r+ibJAxJzn2MPWufx\n+ObQbXuP3RvvqFZw72foxjzLRDTtzOhx2/iT7nfYdsuUkHvZ1pYkqTBWzpqZq3cfPLN2cu++B+k9\nttXz6jGqkhrUUh6nYh613qh99CrpyWCzNs4EuUkm0S3TYzoHWZjkPOib67jf9CfZflS7dpJ1Jjnm\nyvd6tx/W3h1n/911m0iS87x+OyjmcT6L15dVmn4/GCxzghk3iU4yU3yZW9m9bGtLklSYqSvniDgO\n+CywAUhgW2ZeEhFHA1cAxwP3AW/IzB/XCXJYlTSqihtVkc7KypgnOeY4lemwz1XnfJWiG1/vxK9+\n76sZ8xzP8zbJzOg2XPnAI/tPWmtJ6RXpamhl96rT1t4H/IfM/E5EHAHcHBHXA28BbsjMj0TE+cD5\nwHn1Q53coCQ1y2uQg5Jfb7IZtf20LfVRx1jUhDZtW1sTKX48T+qA25pq6u5nnNna4xxzv/090Hnd\nL0nXeXDHOMa9fWrY8cZ9/vU08a62pNw1dVs7M3dn5neq1z8D7gI2AWcAl1erXQ6cWTdISbPleJbK\n0siEsIg4HngJcBOwITN3V2/todMmq21Qa3PcyVWz1tQxx71veNlnH0/b2ld98xjP0+htT6/JV/L4\n3uH3FDdVMfcz6H7maY/55P72DtjnkIePQDNVZRO/sWtldV33ASKTzmhfpuq6dnKOiKcDVwHvzcyH\nI+LJ9zIzIyIHbLcV2AqwftPwY4wzU3k1fYNe5s86bsL19qjZaGI8H37Q7K+fvm7DOt7Igd+IR11f\nHpQ8J0mq0ybgSY/dbXH3PrAEZvsQkmn3Oc5s7Fnsd5nVmq0dEQfTGcify8wvV4sfiIiN1fsb6fuz\nIGTmtszckplbjjymThSSmtDUeD70oEPmE7C0xKZOztH5kfoy4K7MvLjnrWuBc6rX5wDXTB/ecMtc\nQU7irL/YxFl/MaL9MANWrsujhPFcx5UPPFL8rOxJLeNn0vjqtLVfDvwBcFtE3FIt+xPgI8AXI+Jc\n4H7gDfVClDQHjmepIFMn58z8f0AMePuUaffbz6S3zfTeUtS7XW91+cSFBy7rfW9QJfrEhff13V/n\nNlB44sJONXnQhccP2H7wMQfF0+8Y/fbf3faJC+/r+/4TF97XSLehidvCprk/XbMzz/HctHGqy8u+\nUu61s3NPf2jo+6XcB635KvbxnSsT6ahEMCjh9W7XTYCjHHTh8fsl4ZXvdfbV//2njjX8/bqG7X/Q\nDwbw1PkYtM5Vf7hr7BiamJS1MoGvTPzD9m/iXt3GbfmWnJihE9+oBK3Vx8d3SpJUmGIrZ3iqihtW\nIQ1rAw+rIEdVtsO2Hfb+qO2mPd4sjtFbJU87oWza6nXev7TCKnv1Kb1i7tWNdVAF3e0S2N5ePYpO\nzv38zZZ//uTri97+cIuRLL5uQp6klS2VwFnMWna2tSVJKszCVc5gxbzSeZ88EvC8SLBY7exJ2TFY\nPRYuOddJQN0kBsCFfZYtuEGfxaQtLQZnbqvLtrYkSYVZmMp5ktnE41TDH9j0wjrhSJI0MwuTnEdZ\npva0pOksw/XmUbdVaXWwrS1JUmFMzpIkFcbkvAqc98kjh7b92/h1k5Lq8baq5WZyliSpMAsxIWxY\nZedEMEnLyIlhq5uVsyRJhVmIylmShlmGW6ikXgtROQ/7rUkXvf1hH08prXLnnv6Q7V8tlYVIzpIk\nrSYL0dZ2QpgkaTVZiOQsScN4zVnLxra2JEmFqV05R8QaYDuwKzNfHREnAF8AjgFuBv4gM39Z9ziS\nZm9Rx7OTwbRsmqic3wPc1fP1RcDHMvO5wI+Bcxs4hqT5cDxLBaiVnCNiM3A68Knq6wB+C7iyWuVy\n4Mw6x5A0H45nqRx1K+c/A94PPFF9fQzwk8zcV329E+g71ToitkbE9ojY/rAdKakEjYznR58orust\nLZypk3NEvBrYm5k3T7N9Zm7LzC2ZueVIJ1pKrWpyPB960CENRyetPnUmhL0ceE1EnAasA44ELgGO\nioi11U/bm4HBj/eSVArHs1SQqSvnzPxAZm7OzOOBs4H/m5m/D3wdeF212jnANbWjlDRTizCeX7dh\nXVuHLpLnY7nN4j7n84D3RcQOOtesLpvBMSTNh+NZakEjTwjLzG8A36he3wu8tIn9Spq/RR/Pi/60\nMO/ZFviEMEmSimNyliSpMCZnSZIKY3KWtHCGzVQ+9/SHFva67ai4naG9epicJUkqjMlZkqTCNHIr\nlSTNW7fFe+UDj/R9v7dFXOLtVW959ZcB+Mxf/a7tbB3AylmSpMJYOUtaaK/bsG5g9dzVrUybqKDf\n8uov85m/+t3a++ha9Kr5ir3X8cZnv6rtMEa6Yu91A9+rG/8s9m1ylrTwRrW4u849/aEiWtzjtLLV\njG7iHJYkx1lnmu2u2HsdV+y9ji1btky0X7CtLUlScaycJS2NSVrcMN+JYpNWyqW3szXaG5/9qqEt\n72FMzpKWyrgtbhicMFcm7d5rxCu/7r3+3F2+Jl/55LLH45ud9wYs713WG3/XoG/ub+RVfdfrbbH2\n23ac1u3AY055/XTS/U3bZl4mtrUlSSqMlbOkpdRbgY5TRfdaWVGfu6Li3a/aPf2pl29ZsZ8D1h2g\nXwt73Eq4n2m3rXPMcfc3ar91jldXSTPPrZwlSSqMlbOkpTfJdegmDLqe3M+ga8z9KrhxJhiNqv4G\nvT9oeXfZpBXtOFVov8/aRuU67WecJZOzpFVjnNncTRgnKffTRFt10PaDEtAsJ1+VlOxmYZZJ3ba2\nJEmFsXKWtKrUmSjW1HEB2Du3Q7emlMlVo9TtHozz9LFJmZxXsScuvA+Agy48vtU4pEnNM6k2ZVDM\nK5cv4mdT82xrS5JUmFqVc0QcBXwK+DUggbcB9wBXAMcD9wFvyMwf14pS+/nwrlv5wKYX1tpHt2pe\nBq/d+BhX7z647TBGeu3Gxwa+V0L8izCei3ik5d4hcVSt6rHi7LPuG3kVj+/tzPQe2CptuB3eO6lp\n3m3oQe3kJiepjfp8084qHyfO7r7v/fH3J467blv7EuCvM/N1EXEIcDjwJ8ANmfmRiDgfOB84r+Zx\n1DBb2fPTTcrDEvA468yB43mFq965oe/y3uuIay9489jbTXuMSbYb59izPOZZn3gAGO+2r0keTtKE\naW4Hm8V+xzF1WzsingG8ArgMIDN/mZk/Ac4ALq9Wuxw4s26QkmbL8SyVpU7lfALwIPA/I+JFwM3A\ne4ANmbm7WmcP0PdHrYjYCmwFWL+pRhSSmtDYeD78oAJazw3rVoMAb/zE/tXeWT2vu++dxWiD1l25\n/4Exjbm/fsftt86o416x97r9zkO//a2srKdpS8+qtT6LWGZ5GaBOcl4LnAy8OzNviohL6LS8npSZ\nGRHZb+PM3AZsA3jui/qvo6d8eNetA79eef155TXp7rofZv929riztQddK13Zgu3Xmu237Tit23GP\nOa5p9rco17Ib0th4PvrgZzieZ+iOj18AwAve/aGWI9Es1ZmtvRPYmZk3VV9fSWdwPxARGwGqv1fB\n3XzSwnM8SwWZunLOzD0R8cOIeF5m3gOcAtxZ/TkH+Ej19zWNRLrKrayEx52t3bvuRW9/eKJjjlsJ\nD9t+0m3rHnOcOEbtt+7x6mirWnc8L4ar3rkB7rwUgLvfuaFvm7mO3olNJT5vejWpO1v73cDnqpmd\n9wJvpVONfzEizgXuB95Q8xiaQreVPe0tV4OSxNW7Dx6ZvEbNPB6WLAcds3e/45p2lnQbyXHaz9gw\nx3OhRs2SbipJ9ybkWT5zW6PVSs6ZeQuwpc9bp9TZr6T5czxL5fDxnUuqTsUM9arHQdsOqg5nfY9v\ny9WoVMu49y032eK2Wm6fj++UJKkwVs5aeotwO1QhTwhTQSZ5yljv+k1PElM7rJwlqSBXvXPDxIl5\n5fZafCZnSZIKY3LWfq7effBYt0st+jG7XrvxsZnf7zzq83VvIbOlrXE8/6R38PyT3tF2GJqxhbjm\nfNUf7uKsv+j/AO7ugzXO++SR8wxpbj767158wLIPs//jO1f+VpwPD9gOvjXyeK+/9E0A7PvQ6OTU\nXbdr34f6L19p34f6rzPNMYftD95cvT94n0+du6e23/ehzw6NoQ5njqspzz/pHdxdPZBkJa8/Lz4r\nZ0mSCrMQlbPgP33qlidff+BT+9/D/J9WrPuBT73wgGUAF719/6/7/cKLL73j8wBcvfvzI2P60orO\nWnebjz/6+gPWPfY9X9pvvZXbjnPM12587Mn4Vm7Xb3/j7Lffdmsv6L43+hxMwra1Rhk1mct29uqx\nEMl5UEsblredLWn1mDYpd5fb3l4+trUlSSrMQiTnq/5w18D3Lnr7wxP/tiXNxp5LXs+eS/Zvab/7\n0LW8+9C1BywfpDt7uncW9bAZ1dKia6KVPWod731ePAvR1tZiePehg/87vfvQtXxp4LtPWfn87bZ+\nhaIktWkhKmdJklYTK2c1YtS9zb3rjDML2mpZmoyTw5aLyVm1jJOU+23T9G1K0qLxtikNY1tbkqTC\nLE1yHjRru+nZ3M4Of8o0VXPvtnW2l6RltjTJWZKkZbEwyfmqP9w19H7nrt6qdtDr7teDKuDue/22\nGbS/1cSqV6pn2PXmur91ynuel8NSTggblnT7Les+AnTQ+6OO0/YjREfFMeoHiWG/9WsaH3903wHL\nBt0D7eQwrSbzmgTmzO3FtzCVsyRJq0Wtyjki/hj4d0ACtwFvBTYCXwCOAW4G/iAzf1kzzkYru5Xq\ntqjH2X5QdT6sau/4VvX+6P1N+jnGuUyg1WOe43k1GtXKnoVxKmir5zJNnZwjYhPw74GTMvMXEfFF\n4GzgNOBjmfmFiLgUOBf4RN1Au4lklkl6liZptdfZ36wNu9bcr53d+96w1jY0/ysaNb55j2dJw9Vt\na68FDouItcDhwG7gt4Arq/cvB86seQxJ8+F4lgoxdXLOzF3AfwH+ns4g/imdttdPMrNbQu0E+pa6\nEbE1IrZHxPaHHxr/uL2t2O4Mbtuz0znrLzaN3YloYob2xx/dN7S6dgZ4e5ocz48+Yde7NMPa5le9\nc4MzuAtUp639TOAM4ATgJ8CXgFPH3T4ztwHbAJ77osh+64xKHIva4i7NqPM4TtIclnT7rWuLuyxN\njuejD35G3/G8WvmYTk2jTlv7XwN/l5kPZuZjwJeBlwNHVW0xgM2AZa1UPsezVJA6s7X/HnhZRBwO\n/AI4BdgOfB14HZ0ZnucA10yz86Zb1ctcZU8zWa6p8ztJxdxvOyvoYsx0PEuazNTJOTNviogrge8A\n+4C/pdPW+grwhYj4/6pllzURaF1tX5fuJs6VcQxa3jUoSfXbrunPOKqdPW1iVnkWbTwvijZun5qW\nt1WVpdZ9zpl5AXDBisX3Ai+ts19J8+d4lsqxlI/vLNGgqnbaarftToCkxTLqgSTgYz1LYnJWX9M+\nbERS2TO0n3/SO4YmaJXBZ2tLklQYk7NaNaoK98EkWiSjHuhR99dBNmVUHD6UpH0mZ0mSCmNyVuvG\neaynFbSk1cQJYdqP9zZL0yl5EtggwyaHOXO7XVbOkiQVxuQsYHTreFTruQlODpOkDtvaKqqV7TO3\ntWgWsZ3da9TDSWxvt8PKWZKkwpicJWlKi1419xoVq/c+z5dt7VWspHa2pPbZ4i6HlbMkSYUxOatI\n4zyYRJKWlW3tVWpRWtoff3SfM7dVnGW61qwyWTlLklQYk7OKZ4tbJVmE3zpVlzO322dbe5VZlHa2\npHY5c7tdVs6SJBXGynkVGfXs7NI5OUxtchKY5snKWZKkwoxMzhHx6YjYGxG39yw7OiKuj4jvV38/\ns1oeEfHnEbEjIm6NiJNnGbzGM85vnNLq4HjWpEZNcnNy2GyMUzl/Bjh1xbLzgRsy80TghuprgN8B\nTqz+bAU+0UyYUoczt2v7DI7nia2GGdoqy8jknJnfAn60YvEZwOXV68uBM3uWfzY7bgSOioiNTQUr\nqR7Hs7QYpr3mvCEzd1ev9wDdHys3AT/sWW9ntewAEbE1IrZHxPaHH5oyCklNaHQ8P/rEL2cXqVoz\nqrVte7tZtSeEZWYCOcV22zJzS2ZuOfKYulFokD2XvL7tELRAmhjPhx50yAwiK9Nqa2fbwp+faZPz\nA932VvX33mr5LuC4nvU2V8sklcvxLBVm2uR8LXBO9foc4Jqe5W+uZnm+DPhpT7tMLRh0XzCMnlxV\nslGTwpwYNhHHcyEGPY3r7jsvHfheG2xxz97Ih5BExOeB3wSeFRE7gQuAjwBfjIhzgfuBN1SrfxU4\nDdgB/Bx46wxi1hhWw2M6u5/BB5OMz/E8nnGTyywS5rB9Nn283iR7952X2rIuyMjknJmDvsuf0mfd\nBN5VNyhJs+F4lhaDj+9cQqvtgSPDHusJnfNh9axJjPqlD8ti5edb9s+7SEzOS2Q1tLIHscWtptzx\n8QvaDkHy2dqSJJXGylmSerzg3R968vXdzjqemL/fuRlWzpIkFcbkLElSYWxrL4EnH9F5aLtxSMtm\n2Vu0vfdzn/WJB6Z6eMiyn6O2WDlLklQYK+clMOweX1juW6hW8p5naXwrq16r4HJYOS+41Xxv8yCj\nnhnus7cllc7kLElSYUzOC2rPJa/3dzWPME4FLUkl8przgvI68/iGXYf2sZ6SSmTlLElSYUzOC8hJ\nYJK03EzOkiQVxuQsSVJhnBC2QGxnT2+c3/fspDBJpbByliSpMCZnSZIKY1t7QQxradvOHt849zxL\nUtusnCVJKszI5BwRn46IvRFxe8+yj0bE3RFxa0RcHRFH9bz3gYjYERH3RMSrZhW4NK1Rj/VcZo5n\naTGMUzl/Bjh1xbLrgV/LzBcC3wM+ABARJwFnAy+otvkfEbGmsWh1gNWaZJow6twtaZv7MziepeKN\nTM6Z+S3gRyuWfS0zu9/ZbgQ2V6/PAL6QmY9m5t8BO4CXNhivpBocz9JiaGJC2NuAK6rXm+gM7q6d\n1bIDRMRWYCvA+r5ryElgakHt8Xz4QetmGZ+0KtSaEBYRHwT2AZ+bdNvM3JaZWzJzy5HH1IlCUhOa\nGs+HHnRI88FJq8zUlXNEvAV4NXBKZma1eBdwXM9qm6tlkgrmeJbKMlVyjohTgfcDr8zMn/e8dS3w\nlxFxMfArwInA39SOcpXZc8nrOy8ObTcOrQ6OZ6k8I5NzRHwe+E3gWRGxE7iAzmzOQ4HrIwLgxsx8\nR2beERFfBO6k0x57V2Y+Pqvgl9GTiVlF6P57HPueL7UcSTMcz9JiGJmcM7PfrKTLhqz/p8Cf1glK\n0mw4nqXF4OM7F9SgR1B29c7m7rfuoMdYDtpu0uUr3+t9v+2Z5qPOnSS1ze9ShWi6nT0qAQ16v6nl\n025Tqj2XvH5pWtuSyueztSVJKozJuWBtt39XO8+/pLYsXn9xyTg7e3Es28xtSeWycpYkqTDx1MOA\nWgwi4kHgH4F/aDuWAZ5FmbGVGheUG1upccGBsf1qZq5vK5hpRcTPgHvajmOARfr3L0WpccHixDbx\nWC4iOQNExPbM3NJ2HP2UGlupcUG5sZUaF5Qd2yRK/hzGNrlS44Lljs22tiRJhTE5S5JUmJKS87a2\nAxii1NhKjQvKja3UuKDs2CZR8ucwtsmVGhcscWzFXHOWJEkdJVXOkiSJApJzRJwaEfdExI6IOL/l\nWI6LiK9HxJ0RcUdEvKdafmFE7IqIW6o/p7UU330RcVsVw/Zq2dERcX1EfL/6+5lzjul5Peflloh4\nOCLe29Y5i4hPR8TeiLi9Z1nfcxQdf17937s1Ik5uIbaPRsTd1fGvjoijquXHR8Qves7fpbOMrSml\njGfH8tRxOZ6nj6vZsZyZrf0B1gA/AJ4DHAJ8FzipxXg2AidXr48AvgecBFwI/Mc2z1UV033As1Ys\n+8/A+dXr84GLWv733AP8alvnDHgFcDJw+6hzBJwG/G8ggJcBN7UQ278B1lavL+qJ7fje9RbhT0nj\n2bHc2L+n43n8uBody21Xzi8FdmTmvZn5S+ALwBltBZOZuzPzO9XrnwF3AZvaimdMZwCXV68vB85s\nMZZTgB9k5v1tBZCZ3wJ+tGLxoHN0BvDZ7LgROCoiNs4ztsz8WmZ2H+J9I7B5Vsefg2LGs2O5EY7n\nCeJqeiy3nZw3AT/s+XonhQygiDgeeAlwU7Xoj6p2xafbaDdVEvhaRNwcEVurZRsyc3f1eg+woZ3Q\nADgb+HzP1yWcMxh8jkr7//c2Oj/5d50QEX8bEd+MiH/VVlATKO18Ao7lGhzP06s9lttOzkWKiKcD\nVwHvzcyHgU8A/xR4MbAb+K8thfYbmXky8DvAuyLiFb1vZqeH0sr0+4g4BHgN0P2tEKWcs/20eY6G\niYgPAvuAz1WLdgP/JDNfArwP+MuIOLKt+BaVY3k6jufpNTWW207Ou4Djer7eXC1rTUQcTGcwfy4z\nvwyQmQ9k5uOZ+QTwSTrtu7nLzF3V33uBq6s4Hui2bqq/97YRG51vMt/JzAeqGIs4Z5VB56iI/38R\n8Rbg1cDvV99syMxHM/Oh6vXNdK7l/rN5xzahIs5nl2O5FsfzFJocy20n528DJ0bECdVPamcD17YV\nTEQEcBlwV2Ze3LO897rFa4HbV247h9ieFhFHdF/TmXxwO53zdU612jnANfOOrfImelpgJZyzHoPO\n0bXAm6tZni8DftrTLpuLiDgVeD/wmsz8ec/y9RGxpnr9HOBE4N55xjaFYsazY7k2x/OEGh/Ls5rN\nNu4fOjPsvkfnp4kPthzLb9BpkdwK3FL9OQ34X8Bt1fJrgY0txPYcOrNfvwvc0T1XwDHADcD3gf8D\nHN1CbE8DHgKe0bOslXNG5xvKbuAxOteczh10jujM6vzv1f+924AtLcS2g851su7/t0urdc+q/p1v\nAb4D/Nt5/7tO+RmLGM+O5VrxOZ6ni6vRsewTwiRJKkzbbW1JkrSCyVmSpMKYnCVJKozJWZKkwpic\nJUkqjMlZkqTCmJwlSSqMyVmSpMKYnCVJKozJWZKkwpicJUkqjMlZkqTCmJwlSSqMyVmSpMKYnCVJ\nKozJWZKkwpicJUkqjMlZkqTCmJwlSSqMyVmSpMKYnCVJKszMknNEnBoR90TEjog4f1bHkTRbjmVp\n/iIzm99pxBrge8BvAzuBbwNvysw7Gz+YpJlxLEvtWDuj/b4U2JGZ9wJExBeAM4C+A/qYw9bmcUce\nPKNQpMX03b2P/ENmrm85jInGMkCsW5txxCFzCk8qX/7sl+Qj+2KSbWaVnDcBP+z5eifwL3pXiIit\nwFaAzUcczPW/99wZhSItpmf/2e33tx0DY4xl2H88x9MPZt1Zjmep65Grdky8TWsTwjJzW2Zuycwt\nxxy2pq0wJDWgdzyzblY/80urx6yS8y7guJ6vN1fLJC0Wx7LUglkl528DJ0bECRFxCHA2cO2MjiVp\ndhzLUgtm0n/KzH0R8UfAdcAa4NOZeccsjiVpdhzLUjtmdnEoM78KfHVW+5c0H45laf58QpgkSYUx\nOUuSVBiTsyRJhTE5S5JUGJOzJEmFMTlLklQYk7MkSYXxIbiSpOL9/NJbh75/+Dte2Oj+JjnGsH1N\nGleXyVmSVKxu4psmMQ4zSdIcdIxZxQa2tSVJKo6VsyRpYXWr1p9feuvULeRBRlXG4+5jy/YtE29n\n5SxJUmGsnCWpx6DrhONUT5N0vbzXAAAUeElEQVRMDBq3KhunIpwk5pX769122HFWHmPUddYmqthZ\nVMNNGhXb4e944dTXnU3OksT4k3uGvT8oAc9Kv5gmOeYkiX/lMaY5H4ukifi7+3jk/h0Tb2tbW5Kk\nwlg5S1INwyrIOm3NcY477THHaT1P+7kWuVpuShOtfZOzJPXol3QOf8cLx7p+Ow9NfOOf5Br2rFvz\npal7fpv6P2FbW5Kkwlg5SxLjz1ReLW3b1fI5m9LkLHUwOUvSSMOusT5y7MMArNtz5AHL+q037vJR\n60y73crlvXGXqOmkN+pY0xxnFpc3bGtLklQYK2dJYroK7aALj+eJC+978nWTuvvtd8zu+00cs1/l\n39spmPS+7yYr3SZmns+y8p7lvqdOzhFxHPBZYAOQwLbMvCQijgauAI4H7gPekJk/rh+qpFlZreO5\nX9t3VDIY1CoelEwH6U2y07w/zTGHGdTqnvaWqaZavb3Pzq5jWDzTJNlJZrTP+9na+4D/kJknAS8D\n3hURJwHnAzdk5onADdXXksrmeJYKEpnZzI4irgH+W/XnNzNzd0RsBL6Rmc8btu2LNxyW1//ecxuJ\nQ1oWz/6z22/OzMl/5G5AnfF80PrDc91ZizGe+7V0h603jVm1vZu2bs+RY58PTeaRq3bwxIM/j0m2\naeSac0QcD7wEuAnYkJm7q7f20GmT9dtmK7AVYPMRBzcRhqQG1B3P8fTFHs+PHPvwk8mpTlJeFCbi\nMtWerR0RTweuAt6bmfv9T85OWd63NM/MbZm5JTO3HHPYmrphSGpAE+OZdc4zleqqNYoi4mA6A/lz\nmfnlavEDEbGxpw22t26QkmZvNY/n+35yHMc+/44nv55nxbzn7hdMtV1vvHXc95PjADj+qB82sj81\nY+rKOSICuAy4KzMv7nnrWuCc6vU5wDXThydpHhzPUlnqVM4vB/4AuC0ibqmW/QnwEeCLEXEucD/w\nhnohSpqDVT2em6pCJzFtxdy7fRNxt/HZNdrUyTkz/x8waPbZKdPuV9L8OZ5np98s7bqJeeV+TLDL\nx8d3SpJUGKdVSlqVJpn01VSlOyuTxDdOld17O5naYXKWtKosU1KeRlPXqjVbtrUlSSqMlbMkrbCM\nFXMvJ5KVz8pZknose2LWYjA5S5JUGJOzJEmFMTlLklQYk7MkSYUxOUuSVBiTsyRJhTE5S5JUGJOz\nJEmFMTlLklQYk7MkSYUxOUuSVBiTsyRJhTE5S5JUGJOzJEmFMTlLklSY2sk5ItZExN9GxF9VX58Q\nETdFxI6IuCIiDqkfpqR5cDxLZWiicn4PcFfP1xcBH8vM5wI/Bs5t4BiS5sPxLBWgVnKOiM3A6cCn\nqq8D+C3gymqVy4Ez6xxD0nw4nqVy1K2c/wx4P/BE9fUxwE8yc1/19U5gU81jSJoPx7NUiKmTc0S8\nGtibmTdPuf3WiNgeEdsf+sXj04YhqQFNjmce2Td6A0lDra2x7cuB10TEacA64EjgEuCoiFhb/bS9\nGdjVb+PM3AZsA3jxhsOyRhyS6mtsPB+0/nDHs1TT1JVzZn4gMzdn5vHA2cD/zczfB74OvK5a7Rzg\nmtpRSpopx7NUllnc53we8L6I2EHnmtVlMziGpPlwPEstqNPWflJmfgP4RvX6XuClTexX0vw5nqX2\n+YQwSZIKY3KWJKkwJmdJkgpjcpYkqTAmZ0mSCmNyliSpMCZnSZIKY3KWJKkwJmdJkgpjcpYkqTAm\nZ0mSCmNyliSpMCZnSZIKY3KWJKkwJmdJkgpjcpYkqTBr2w5AkjRfxz7/jrZD0AhWzpK0ipiYF4PJ\nWZKkwpicJWkV2XP3C9oOQWMwOUuSVBiTsyRJhamVnCPiqIi4MiLujoi7IuJfRsTREXF9RHy/+vuZ\nTQWr5bD+Y7e1HYL6WC3jed2eI1m358iB7y/7hKll/3zLom7lfAnw15n5fOBFwF3A+cANmXkicEP1\ntbSf9R+7rZUkvQg/GLR1blgl4/mRYx/mkWMfHvi+12QZ+sOL5mPq5BwRzwBeAVwGkJm/zMyfAGcA\nl1erXQ6cWTdISbPleJbKUuchJCcADwL/MyJeBNwMvAfYkJm7q3X2ABvqhahl8+Af/3rbIRRr/cdu\na+v8OJ5XiT13v8DW9gKo09ZeC5wMfCIzXwL8IytaXpmZQPbbOCK2RsT2iNj+0C8erxGGpAY0Np55\nZN/Mg9X0TMyLoU5y3gnszMybqq+vpDO4H4iIjQDV33v7bZyZ2zJzS2ZuOeawNTXCkNSAxsYz63wq\nsFTX1KMoM/dExA8j4nmZeQ9wCnBn9ecc4CPV39c0EqmKNmgCU78W7aDWbb/lvfsd1O5deey6beF+\nn2XYsZtoQ3eP2VbLfzWN5+5kp0GTwnoryxInh3Xj643t2OffccDX/dYZZt2eI4dOlNN81f0R993A\n5yLiEOBe4K10qvEvRsS5wP3AG2oeQwUbllTqzDgeJ1n1W6fpY3aXDUvCbSfWBq2K8TxJAiq5Bbwy\ntn6xjhN/N4Eff9QPmwlMjaiVnDPzFmBLn7dOqbNfSfPneJbK4cUhTW2clu6kVeW46w869oN//OtT\nVbLD9tcb16D3tThGtbWlEvj4TkmSCmPlrJmZpqpsohKftGLu91rLzwlQHSVfV1/NTM4StqdXq36P\nqVxtCdtHdZbJtrYkSYUxOUsFsbXevu5vrSq1ouwXW7+vBy3rXV7qZ5Rtbc1Y0/cA986eHnXf8Zp8\n5QHvPQ4ce/HRAOx534/G2t+w95t6CMmoWeFqR8nJa1SCHrRs2HKVw8pZkqTCROdZ9u168YbD8vrf\ne27bYWgM3aqz6/H45tD1+1av8c2Jlvcz7LjD9jHs2MMM2mfvduPGPkp3nxFxc2b2eyhI0Q5af3iu\nO8vxLHU9ctUOnnjw5zHJNra1NbE97/vRk68fZHhLd0+fZQ/y6xMt7+r9wWDaRDhou1H76/3MvXo/\n/7DYx3XsxUc7c1ySbW1Jkkpj5awirWyft21lPIMqaUlqgslZRSktKQ/SG6eJWlLTbGtLklQYK2cV\no6mq+fQLLhprva986LxGjtd737QkNcHkrFZNmpDHTbxN7GvS5H3sxUeboCU1wra2JEmFsXJWK6Zp\nYTdZNU96vHGraFvckppg5SxJUmGsnCew5meeLpjuPDx+xL4nX49TNc+7Sh5lZTyjKmlvtZJUh9lG\nRSktKQ8yScvbVrekSdnWliSpMLWSc0T8cUTcERG3R8TnI2JdRJwQETdFxI6IuCIiDmkqWC2uYy8+\n+sk/gyxK1bzS6RdcNFbspT/9zPEslWPq5BwRm4B/D2zJzF8D1gBnAxcBH8vM5wI/Bs5tIlAtpvWf\nPJL1nxz+i93HTW6lW+TP4HiWylK3rb0WOCwi1gKHA7uB3wKurN6/HDiz5jEkzYfjWSrE1Mk5M3cB\n/wX4ezqD+KfAzcBPMrM7NXcnsKnf9hGxNSK2R8T2h37x+LRhqGCjKmZY7Gqzn1FdgFGt/bY0OZ55\nZF+/VSRNoE5b+5nAGcAJwK8ATwNOHXf7zNyWmVsyc8sxh62ZNgwtqGVpZQ8yTpIuSZPjmXXeBCLV\nVaet/a+Bv8vMBzPzMeDLwMuBo6q2GMBmYFfNGCXNnuNZKkid5Pz3wMsi4vCICOAU4E7g68DrqnXO\nAa6pF6K0uBaoxe14lgpS55rzTXQminwHuK3a1zbgPOB9EbEDOAa4rIE4tUBGzdBe5nZ2P4vQ4nY8\nS2WpdXEoMy8ALlix+F7gpXX2K2n+HM9SOZy5ocaMcz+zJGk0H9+pRpiYJak5JmdJkgpjcpYkqTAm\nZ0mSCuOEMNXitebxnX7BRQN/93MJt1NJKoeVs6ZmYpak2TA5S5JUGJOzJEmFMTlLc7Tsv41LUjNM\nzpIkFcbkLLVggX5blaQWeCuVJuYsbUmaLStnSZIKY3KWJKkwJmdJkgpjcpYkqTAmZ0mSCuNsbTXG\nWdqS1AwrZ0mSCmNyliSpMCZnSZIKMzI5R8SnI2JvRNzes+zoiLg+Ir5f/f3ManlExJ9HxI6IuDUi\nTp5l8NIia+OXYDiepcUwTuX8GeDUFcvOB27IzBOBG6qvAX4HOLH6sxX4RDNhSmrIZ3A8S8UbmZwz\n81vAj1YsPgO4vHp9OXBmz/LPZseNwFERsbGpYCXV43iWFsO015w3ZObu6vUeYEP1ehPww571dlbL\nDhARWyNie0Rsf+gXj08ZhqQGNDqeeWTf7CKVVona9zlnZkZETrHdNmAbwIs3HDbx9tKi+8qHzms7\nhAM0MZ4PWn+441mqadrK+YFue6v6e2+1fBdwXM96m6tlksrleJYKM21yvhY4p3p9DnBNz/I3V7M8\nXwb8tKddJqlMjmepMCPb2hHxeeA3gWdFxE7gAuAjwBcj4lzgfuAN1epfBU4DdgA/B946g5glTcnx\nLC2Gkck5M9804K1T+qybwLvqBiVpNhzP0mLwCWGSJBXG5CxJUmFMzpIkFcbkrMZ85UPnFXnvriQt\nGpOzJEmFqf2EMEmTG9Zh2PO+zqOvnz2vYCQVx8pZE3vw7Q/z4NsfHvi+7W1JqsfkLElSYUzOkiQV\nxmvO0hzZ7pc0DitnSZIKY3KW5mRY1Xz6BRdx+gUXzTEaSSUzOWtqw2Zsgy3cSVz2s7dz2c/e3nYY\nkgphcpYkqTAmZ82U9zxL0uRMzqpl1ANJukzQkjQ+k7MkSYUxOasR41bPVtCSNJoPIVFjugl6/SeP\nbDmSsoz6gaT7iy4kqcvKWZKkwpicNXe2tiVpOJOzJEmFGZmcI+LTEbE3Im7vWfbRiLg7Im6NiKsj\n4qie9z4QETsi4p6IeNWsAle5xrm9arVMDivtMzqepcUwTuX8GeDUFcuuB34tM18IfA/4AEBEnASc\nDbyg2uZ/RMSaxqLVQlnN9z+P88PHnvf9qI3JYJ/B8SwVb2RyzsxvAT9asexrmbmv+vJGYHP1+gzg\nC5n5aGb+HbADeGmD8UqqwfEsLYYmbqV6G3BF9XoTncHdtbNadoCI2ApsBdh8xMENhKESjXN7VW+F\nuei/mWncTkDBt0/VHs/xdMezVFet5BwRHwT2AZ+bdNvM3AZsA3jxhsOyThwq3573/YhjLz565Hr9\nkltpCXvaVnzBCRlobjwftP5wx7NU09TJOSLeArwaOCUzu4NxF3Bcz2qbq2WSCuZ4lsoyVXKOiFOB\n9wOvzMyf97x1LfCXEXEx8CvAicDf1I5SS6G3chyniu4qpe29rJPXHM9SeUYm54j4PPCbwLMiYidw\nAZ3ZnIcC10cEwI2Z+Y7MvCMivgjcSac99q7MfHxWwWtxdRP1JEka2kvUdRJzSe1sx7O0GEYm58x8\nU5/Flw1Z/0+BP60TlKTZcDxLi8FffKFWjTtRrJ9JqtlJquwm29clVc2SFofJWa2b9lr0JOZ1vdhk\nLKkJPltbkqTCWDmrKP0qz1lV03VYIUuaJZPzBB4/Yt/olVaBYU/7Wi1K/IFB0vKwrS1JUmHiqYcB\ntRhExIPAPwL/0HYsAzyLMmMrNS4oN7ZS44IDY/vVzFzfVjDTioifAfe0HccAi/TvX4pS44LFiW3i\nsVxEcgaIiO2ZuaXtOPopNbZS44JyYys1Lig7tkmU/DmMbXKlxgXLHZttbUmSCmNyliSpMCUl521t\nBzBEqbGVGheUG1upcUHZsU2i5M9hbJMrNS5Y4tiKueYsSZI6SqqcJUkSJmdJkorTenKOiFMj4p6I\n2BER57ccy3ER8fWIuDMi7oiI91TLL4yIXRFxS/XntJbiuy8ibqti2F4tOzoiro+I71d/P3POMT2v\n57zcEhEPR8R72zpnEfHpiNgbEbf3LOt7jqLjz6v/e7dGxMktxPbRiLi7Ov7VEXFUtfz4iPhFz/m7\ndJaxNaWU8exYnjoux/P0cTU7ljOztT/AGuAHwHOAQ4DvAie1GM9G4OTq9RHA94CTgAuB/9jmuapi\nug941opl/xk4v3p9PnBRy/+ee4BfbeucAa8ATgZuH3WOgNOA/w0E8DLgphZi+zfA2ur1RT2xHd+7\n3iL8KWk8O5Yb+/d0PI8fV6Njue3K+aXAjsy8NzN/CXwBOKOtYDJzd2Z+p3r9M+AuYFNb8YzpDODy\n6vXlwJktxnIK8IPMvL+tADLzW8DK30ox6BydAXw2O24EjoqIjfOMLTO/lpndh7bfCGye1fHnoJjx\n7FhuhON5griaHsttJ+dNwA97vt5JIQMoIo4HXgLcVC36o6pd8ek22k2VBL4WETdHxNZq2YbM3F29\n3gNsaCc0AM4GPt/zdQnnDAafo9L+/72Nzk/+XSdExN9GxDcj4l+1FdQESjufgGO5Bsfz9GqP5baT\nc5Ei4unAVcB7M/Nh4BPAPwVeDOwG/mtLof1GZp4M/A7wroh4Re+b2emhtHJvXEQcArwG+FK1qJRz\ntp82z9EwEfFBYB/wuWrRbuCfZOZLgPcBfxkR/jqwCTmWp+N4nl5TY7nt5LwLOK7n683VstZExMF0\nBvPnMvPLAJn5QGY+nplPAJ+k076bu8zcVf29F7i6iuOBbuum+ntvG7HR+Sbzncx8oIqxiHNWGXSO\nivj/FxFvAV4N/H71zYbMfDQzH6pe30znWu4/m3dsEyrifHY5lmtxPE+hybHcdnL+NnBiRJxQ/aR2\nNnBtW8FERACXAXdl5sU9y3uvW7wWuH3ltnOI7WkRcUT3NZ3JB7fTOV/nVKudA1wz79gqb6KnBVbC\nOesx6BxdC7y5muX5MuCnPe2yuYiIU4H3A6/JzJ/3LF8fEWuq188BTgTunWdsUyhmPDuWa3M8T6jx\nsTyr2Wzj/qEzw+57dH6a+GDLsfwGnRbJrcAt1Z/TgP8F3FYtvxbY2EJsz6Ez+/W7wB3dcwUcA9wA\nfB/4P8DRLcT2NOAh4Bk9y1o5Z3S+oewGHqNzzencQeeIzqzO/17937sN2NJCbDvoXCfr/n+7tFr3\nrOrf+RbgO8C/nfe/65SfsYjx7FiuFZ/jebq4Gh3LPr5TkqTCtN3WliRJK5icJUkqjMlZkqTCmJwl\nSSqMyVmSpMKYnCVJKozJWZKkwvz/YBaf33QExS0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MTct6_8Hbfpz", + "colab_type": "text" + }, + "source": [ + "### Fin" + ] + } + ] +} \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 4242117db..762441fe2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -29,7 +29,7 @@ ENV PATH=$CONDA_PREFIX/bin:$PATH ENV CONDA_AUTO_UPDATE_CONDA=false RUN conda install -y ipython -RUN pip install ninja yacs cython matplotlib opencv-python tqdm +RUN pip install requests ninja yacs cython matplotlib opencv-python tqdm # Install PyTorch 1.0 Nightly ARG CUDA diff --git a/docker/docker-jupyter/Dockerfile b/docker/docker-jupyter/Dockerfile index 323727195..afc512679 100644 --- a/docker/docker-jupyter/Dockerfile +++ b/docker/docker-jupyter/Dockerfile @@ -28,11 +28,14 @@ ENV PATH=$CONDA_PREFIX/bin:$PATH ENV CONDA_AUTO_UPDATE_CONDA=false RUN conda install -y ipython -RUN pip install ninja yacs cython matplotlib jupyter +RUN pip install requests ninja yacs cython matplotlib jupyter tqdm -# Install PyTorch 1.0 Nightly and OpenCV -RUN conda install -y pytorch-nightly -c pytorch \ - && conda install -y opencv -c menpo \ +# Install PyTorch Nightly +ARG CUDA +RUN conda install -y pytorch-nightly cudatoolkit=${CUDA} -c pytorch + +# Install OpenCV +RUN conda install -y opencv -c menpo \ && conda clean -ya WORKDIR /root @@ -53,7 +56,14 @@ RUN git clone https://github.com/cocodataset/cocoapi.git \ && cd cocoapi/PythonAPI \ && python setup.py build_ext install +# install apex +RUN git clone https://github.com/NVIDIA/apex.git \ + && cd apex \ + && python setup.py install --cuda_ext --cpp_ext + # install PyTorch Detection +ARG FORCE_CUDA="1" +ENV FORCE_CUDA=${FORCE_CUDA} RUN git clone https://github.com/facebookresearch/maskrcnn-benchmark.git \ && cd maskrcnn-benchmark \ && python setup.py build develop diff --git a/docker/docker-jupyter/jupyter_notebook_config.py b/docker/docker-jupyter/jupyter_notebook_config.py index bd5494812..e8fbe7de4 100644 --- a/docker/docker-jupyter/jupyter_notebook_config.py +++ b/docker/docker-jupyter/jupyter_notebook_config.py @@ -1,7 +1,7 @@ import os from IPython.lib import passwd -#c = c # pylint:disable=undefined-variable +# c = c # pylint:disable=undefined-variable c = get_config() c.NotebookApp.ip = '0.0.0.0' c.NotebookApp.port = int(os.getenv('PORT', 8888)) @@ -9,10 +9,10 @@ # sets a password if PASSWORD is set in the environment if 'PASSWORD' in os.environ: - password = os.environ['PASSWORD'] - if password: - c.NotebookApp.password = passwd(password) - else: - c.NotebookApp.password = '' - c.NotebookApp.token = '' - del os.environ['PASSWORD'] + password = os.environ['PASSWORD'] + if password: + c.NotebookApp.password = passwd(password) + else: + c.NotebookApp.password = '' + c.NotebookApp.token = '' + del os.environ['PASSWORD'] diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index 37e362cd7..65fbdaddd 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -60,6 +60,7 @@ _C.INPUT.SATURATION = 0.0 _C.INPUT.HUE = 0.0 +_C.INPUT.VERTICAL_FLIP_PROB_TRAIN = 0.0 # ----------------------------------------------------------------------------- # Dataset @@ -98,8 +99,6 @@ # Add StopGrad at a specified stage so the bottom layers are frozen _C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2 -# GN for backbone -_C.MODEL.BACKBONE.USE_GN = False # ---------------------------------------------------------------------------- # @@ -427,6 +426,27 @@ # Number of detections per image _C.TEST.DETECTIONS_PER_IMG = 100 +# ---------------------------------------------------------------------------- # +# Test-time augmentations for bounding box detection +# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_1x.yaml for an example +# ---------------------------------------------------------------------------- # +_C.TEST.BBOX_AUG = CN() + +# Enable test-time augmentation for bounding box detection if True +_C.TEST.BBOX_AUG.ENABLED = False + +# Horizontal flip at the original scale (id transform) +_C.TEST.BBOX_AUG.H_FLIP = False + +# Each scale is the pixel size of an image's shortest side +_C.TEST.BBOX_AUG.SCALES = () + +# Max pixel size of the longer side +_C.TEST.BBOX_AUG.MAX_SIZE = 4000 + +# Horizontal flip at each scale +_C.TEST.BBOX_AUG.SCALE_H_FLIP = False + # ---------------------------------------------------------------------------- # # Misc options diff --git a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp index d35aedf27..d531da623 100644 --- a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp +++ b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp @@ -91,7 +91,7 @@ void pre_calc_for_bilinear_interpolate( T hy = 1. - ly, hx = 1. - lx; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; - // save weights and indeces + // save weights and indices PreCalc pc; pc.pos1 = y_low * width + x_low; pc.pos2 = y_low * width + x_high; @@ -168,8 +168,8 @@ void ROIAlignForward_cpu_kernel( // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 - // we want to precalculate indeces and weights shared by all chanels, - // this is the key point of optimiation + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization std::vector> pre_calc( roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); pre_calc_for_bilinear_interpolate( diff --git a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu index 7d40767bb..456a5f235 100644 --- a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu @@ -117,7 +117,8 @@ at::Tensor SigmoidFocalLoss_forward_cuda( auto losses_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); + dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L)); + dim3 block(512); if (losses.numel() == 0) { @@ -161,7 +162,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda( auto d_logits_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); + dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { diff --git a/maskrcnn_benchmark/data/build.py b/maskrcnn_benchmark/data/build.py index d2895fd7e..26239155d 100644 --- a/maskrcnn_benchmark/data/build.py +++ b/maskrcnn_benchmark/data/build.py @@ -6,11 +6,12 @@ import torch.utils.data from maskrcnn_benchmark.utils.comm import get_world_size from maskrcnn_benchmark.utils.imports import import_file +from maskrcnn_benchmark.utils.miscellaneous import save_labels from . import datasets as D from . import samplers -from .collate_batch import BatchCollator +from .collate_batch import BatchCollator, BBoxAugCollator from .transforms import build_transforms @@ -18,7 +19,7 @@ def build_dataset(dataset_list, transforms, dataset_catalog, is_train=True): """ Arguments: dataset_list (list[str]): Contains the names of the datasets, i.e., - coco_2014_trian, coco_2014_val, etc + coco_2014_train, coco_2014_val, etc transforms (callable): transforms to apply to each (image, target) sample dataset_catalog (DatasetCatalog): contains the information on how to construct a dataset. @@ -110,8 +111,8 @@ def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): images_per_batch = cfg.SOLVER.IMS_PER_BATCH assert ( images_per_batch % num_gpus == 0 - ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number " - "of GPUs ({}) used.".format(images_per_batch, num_gpus) + ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format( + images_per_batch, num_gpus) images_per_gpu = images_per_batch // num_gpus shuffle = True num_iters = cfg.SOLVER.MAX_ITER @@ -119,8 +120,8 @@ def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): images_per_batch = cfg.TEST.IMS_PER_BATCH assert ( images_per_batch % num_gpus == 0 - ), "TEST.IMS_PER_BATCH ({}) must be divisible by the number " - "of GPUs ({}) used.".format(images_per_batch, num_gpus) + ), "TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format( + images_per_batch, num_gpus) images_per_gpu = images_per_batch // num_gpus shuffle = False if not is_distributed else True num_iters = None @@ -150,16 +151,22 @@ def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): DatasetCatalog = paths_catalog.DatasetCatalog dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST - transforms = build_transforms(cfg, is_train) + # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later + transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms(cfg, is_train) datasets = build_dataset(dataset_list, transforms, DatasetCatalog, is_train) + if is_train: + # save category_id to label name mapping + save_labels(datasets, cfg.OUTPUT_DIR) + data_loaders = [] for dataset in datasets: sampler = make_data_sampler(dataset, shuffle, is_distributed) batch_sampler = make_batch_data_sampler( dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter ) - collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY) + collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \ + BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY) num_workers = cfg.DATALOADER.NUM_WORKERS data_loader = torch.utils.data.DataLoader( dataset, diff --git a/maskrcnn_benchmark/data/collate_batch.py b/maskrcnn_benchmark/data/collate_batch.py index a7f034167..56571f18c 100644 --- a/maskrcnn_benchmark/data/collate_batch.py +++ b/maskrcnn_benchmark/data/collate_batch.py @@ -18,3 +18,15 @@ def __call__(self, batch): targets = transposed_batch[1] img_ids = transposed_batch[2] return images, targets, img_ids + + +class BBoxAugCollator(object): + """ + From a list of samples from the dataset, + returns the images and targets. + Images should be converted to batched images in `im_detect_bbox_aug` + """ + + def __call__(self, batch): + return list(zip(*batch)) + diff --git a/maskrcnn_benchmark/data/datasets/coco.py b/maskrcnn_benchmark/data/datasets/coco.py index d0e42b437..cc10f29d1 100644 --- a/maskrcnn_benchmark/data/datasets/coco.py +++ b/maskrcnn_benchmark/data/datasets/coco.py @@ -54,6 +54,8 @@ def __init__( ids.append(img_id) self.ids = ids + self.categories = {cat['id']: cat['name'] for cat in self.coco.cats.values()} + self.json_category_id_to_contiguous_id = { v: i + 1 for i, v in enumerate(self.coco.getCatIds()) } @@ -61,7 +63,7 @@ def __init__( v: k for k, v in self.json_category_id_to_contiguous_id.items() } self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} - self.transforms = transforms + self._transforms = transforms def __getitem__(self, idx): img, anno = super(COCODataset, self).__getitem__(idx) @@ -79,9 +81,10 @@ def __getitem__(self, idx): classes = torch.tensor(classes) target.add_field("labels", classes) - masks = [obj["segmentation"] for obj in anno] - masks = SegmentationMask(masks, img.size, mode='poly') - target.add_field("masks", masks) + if anno and "segmentation" in anno[0]: + masks = [obj["segmentation"] for obj in anno] + masks = SegmentationMask(masks, img.size, mode='poly') + target.add_field("masks", masks) if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] @@ -90,8 +93,8 @@ def __getitem__(self, idx): target = target.clip_to_image(remove_empty=True) - if self.transforms is not None: - img, target = self.transforms(img, target) + if self._transforms is not None: + img, target = self._transforms(img, target) return img, target, idx diff --git a/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py b/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py index e4ca14baa..a8fdc280e 100644 --- a/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py +++ b/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py @@ -364,8 +364,14 @@ def update(self, coco_eval): res[metric] = s[idx] def __repr__(self): - # TODO make it pretty - return repr(self.results) + results = '\n' + for task, metrics in self.results.items(): + results += 'Task: {}\n'.format(task) + metric_names = metrics.keys() + metric_vals = ['{:.4f}'.format(v) for v in metrics.values()] + results += (', '.join(metric_names) + '\n') + results += (', '.join(metric_vals) + '\n') + return results def check_expected_results(results, expected_results, sigma_tol): diff --git a/maskrcnn_benchmark/data/datasets/voc.py b/maskrcnn_benchmark/data/datasets/voc.py index 459985bd1..ab4075ec5 100644 --- a/maskrcnn_benchmark/data/datasets/voc.py +++ b/maskrcnn_benchmark/data/datasets/voc.py @@ -57,6 +57,7 @@ def __init__(self, data_dir, split, use_difficult=False, transforms=None): cls = PascalVOCDataset.CLASSES self.class_to_ind = dict(zip(cls, range(len(cls)))) + self.categories = dict(zip(range(len(cls)), cls)) def __getitem__(self, index): img_id = self.ids[index] @@ -89,7 +90,7 @@ def _preprocess_annotation(self, target): gt_classes = [] difficult_boxes = [] TO_REMOVE = 1 - + for obj in target.iter("object"): difficult = int(obj.find("difficult").text) == 1 if not self.keep_difficult and difficult: @@ -99,9 +100,9 @@ def _preprocess_annotation(self, target): # Make pixel indexes 0-based # Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211" box = [ - bb.find("xmin").text, - bb.find("ymin").text, - bb.find("xmax").text, + bb.find("xmin").text, + bb.find("ymin").text, + bb.find("xmax").text, bb.find("ymax").text, ] bndbox = tuple( diff --git a/maskrcnn_benchmark/data/transforms/build.py b/maskrcnn_benchmark/data/transforms/build.py index 88aa975b6..52385ea7d 100644 --- a/maskrcnn_benchmark/data/transforms/build.py +++ b/maskrcnn_benchmark/data/transforms/build.py @@ -6,7 +6,8 @@ def build_transforms(cfg, is_train=True): if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN - flip_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN + flip_horizontal_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN + flip_vertical_prob = cfg.INPUT.VERTICAL_FLIP_PROB_TRAIN brightness = cfg.INPUT.BRIGHTNESS contrast = cfg.INPUT.CONTRAST saturation = cfg.INPUT.SATURATION @@ -14,7 +15,8 @@ def build_transforms(cfg, is_train=True): else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST - flip_prob = 0 + flip_horizontal_prob = 0.0 + flip_vertical_prob = 0.0 brightness = 0.0 contrast = 0.0 saturation = 0.0 @@ -35,7 +37,8 @@ def build_transforms(cfg, is_train=True): [ color_jitter, T.Resize(min_size, max_size), - T.RandomHorizontalFlip(flip_prob), + T.RandomHorizontalFlip(flip_horizontal_prob), + T.RandomVerticalFlip(flip_vertical_prob), T.ToTensor(), normalize_transform, ] diff --git a/maskrcnn_benchmark/data/transforms/transforms.py b/maskrcnn_benchmark/data/transforms/transforms.py index 1c322f8ba..2d37dc72f 100644 --- a/maskrcnn_benchmark/data/transforms/transforms.py +++ b/maskrcnn_benchmark/data/transforms/transforms.py @@ -54,9 +54,11 @@ def get_size(self, image_size): return (oh, ow) - def __call__(self, image, target): + def __call__(self, image, target=None): size = self.get_size(image.size) image = F.resize(image, size) + if target is None: + return image target = target.resize(image.size) return image, target @@ -71,6 +73,15 @@ def __call__(self, image, target): target = target.transpose(0) return image, target +class RandomVerticalFlip(object): + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, image, target): + if random.random() < self.prob: + image = F.vflip(image) + target = target.transpose(1) + return image, target class ColorJitter(object): def __init__(self, @@ -101,8 +112,10 @@ def __init__(self, mean, std, to_bgr255=True): self.std = std self.to_bgr255 = to_bgr255 - def __call__(self, image, target): + def __call__(self, image, target=None): if self.to_bgr255: image = image[[2, 1, 0]] * 255 image = F.normalize(image, mean=self.mean, std=self.std) + if target is None: + return image return image, target diff --git a/maskrcnn_benchmark/engine/bbox_aug.py b/maskrcnn_benchmark/engine/bbox_aug.py new file mode 100644 index 000000000..444416538 --- /dev/null +++ b/maskrcnn_benchmark/engine/bbox_aug.py @@ -0,0 +1,118 @@ +import torch +import torchvision.transforms as TT + +from maskrcnn_benchmark.config import cfg +from maskrcnn_benchmark.data import transforms as T +from maskrcnn_benchmark.structures.image_list import to_image_list +from maskrcnn_benchmark.structures.bounding_box import BoxList +from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import make_roi_box_post_processor + + +def im_detect_bbox_aug(model, images, device): + # Collect detections computed under different transformations + boxlists_ts = [] + for _ in range(len(images)): + boxlists_ts.append([]) + + def add_preds_t(boxlists_t): + for i, boxlist_t in enumerate(boxlists_t): + if len(boxlists_ts[i]) == 0: + # The first one is identity transform, no need to resize the boxlist + boxlists_ts[i].append(boxlist_t) + else: + # Resize the boxlist as the first one + boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size)) + + # Compute detections for the original image (identity transform) + boxlists_i = im_detect_bbox( + model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device + ) + add_preds_t(boxlists_i) + + # Perform detection on the horizontally flipped image + if cfg.TEST.BBOX_AUG.H_FLIP: + boxlists_hf = im_detect_bbox_hflip( + model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device + ) + add_preds_t(boxlists_hf) + + # Compute detections at different scales + for scale in cfg.TEST.BBOX_AUG.SCALES: + max_size = cfg.TEST.BBOX_AUG.MAX_SIZE + boxlists_scl = im_detect_bbox_scale( + model, images, scale, max_size, device + ) + add_preds_t(boxlists_scl) + + if cfg.TEST.BBOX_AUG.SCALE_H_FLIP: + boxlists_scl_hf = im_detect_bbox_scale( + model, images, scale, max_size, device, hflip=True + ) + add_preds_t(boxlists_scl_hf) + + # Merge boxlists detected by different bbox aug params + boxlists = [] + for i, boxlist_ts in enumerate(boxlists_ts): + bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts]) + scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts]) + boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode) + boxlist.add_field('scores', scores) + boxlists.append(boxlist) + + # Apply NMS and limit the final detections + results = [] + post_processor = make_roi_box_post_processor(cfg) + for boxlist in boxlists: + results.append(post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES)) + + return results + + +def im_detect_bbox(model, images, target_scale, target_max_size, device): + """ + Performs bbox detection on the original image. + """ + transform = TT.Compose([ + T.Resize(target_scale, target_max_size), + TT.ToTensor(), + T.Normalize( + mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255 + ) + ]) + images = [transform(image) for image in images] + images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY) + return model(images.to(device)) + + +def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device): + """ + Performs bbox detection on the horizontally flipped image. + Function signature is the same as for im_detect_bbox. + """ + transform = TT.Compose([ + T.Resize(target_scale, target_max_size), + TT.RandomHorizontalFlip(1.0), + TT.ToTensor(), + T.Normalize( + mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255 + ) + ]) + images = [transform(image) for image in images] + images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY) + boxlists = model(images.to(device)) + + # Invert the detections computed on the flipped image + boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists] + return boxlists_inv + + +def im_detect_bbox_scale(model, images, target_scale, target_max_size, device, hflip=False): + """ + Computes bbox detections at the given scale. + Returns predictions in the scaled image space. + """ + if hflip: + boxlists_scl = im_detect_bbox_hflip(model, images, target_scale, target_max_size, device) + else: + boxlists_scl = im_detect_bbox(model, images, target_scale, target_max_size, device) + return boxlists_scl diff --git a/maskrcnn_benchmark/engine/inference.py b/maskrcnn_benchmark/engine/inference.py index e125cb877..82d0abb6d 100644 --- a/maskrcnn_benchmark/engine/inference.py +++ b/maskrcnn_benchmark/engine/inference.py @@ -6,11 +6,13 @@ import torch from tqdm import tqdm +from maskrcnn_benchmark.config import cfg from maskrcnn_benchmark.data.datasets.evaluation import evaluate from ..utils.comm import is_main_process, get_world_size from ..utils.comm import all_gather from ..utils.comm import synchronize from ..utils.timer import Timer, get_time_str +from .bbox_aug import im_detect_bbox_aug def compute_on_dataset(model, data_loader, device, timer=None): @@ -19,13 +21,16 @@ def compute_on_dataset(model, data_loader, device, timer=None): cpu_device = torch.device("cpu") for _, batch in enumerate(tqdm(data_loader)): images, targets, image_ids = batch - images = images.to(device) with torch.no_grad(): if timer: timer.tic() - output = model(images) + if cfg.TEST.BBOX_AUG.ENABLED: + output = im_detect_bbox_aug(model, images, device) + else: + output = model(images.to(device)) if timer: - torch.cuda.synchronize() + if not cfg.MODEL.DEVICE == 'cpu': + torch.cuda.synchronize() timer.toc() output = [o.to(cpu_device) for o in output] results_dict.update( diff --git a/maskrcnn_benchmark/engine/trainer.py b/maskrcnn_benchmark/engine/trainer.py index 281d91339..560b63e1c 100644 --- a/maskrcnn_benchmark/engine/trainer.py +++ b/maskrcnn_benchmark/engine/trainer.py @@ -55,6 +55,10 @@ def do_train( start_training_time = time.time() end = time.time() for iteration, (images, targets, _) in enumerate(data_loader, start_iter): + + if any(len(target) < 1 for target in targets): + logger.error(f"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}" ) + continue data_time = time.time() - end iteration = iteration + 1 arguments["iteration"] = iteration diff --git a/maskrcnn_benchmark/layers/dcn/__init__.py b/maskrcnn_benchmark/layers/dcn/__init__.py index 22fe18ff3..bb5af25d4 100644 --- a/maskrcnn_benchmark/layers/dcn/__init__.py +++ b/maskrcnn_benchmark/layers/dcn/__init__.py @@ -1,3 +1,3 @@ -# +# # Copied From [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/mmdet/ops/dcn) -# \ No newline at end of file +# diff --git a/maskrcnn_benchmark/layers/dcn/deform_conv_func.py b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py index a276a05fe..388bacf12 100644 --- a/maskrcnn_benchmark/layers/dcn/deform_conv_func.py +++ b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py @@ -10,15 +10,15 @@ class DeformConvFunction(Function): @staticmethod def forward( - ctx, - input, - offset, + ctx, + input, + offset, weight, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, im2col_step=64 ): if input is not None and input.dim() != 4: @@ -47,21 +47,21 @@ def forward( assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize' _C.deform_conv_forward( - input, - weight, - offset, - output, - ctx.bufs_[0], + input, + weight, + offset, + output, + ctx.bufs_[0], ctx.bufs_[1], - weight.size(3), - weight.size(2), - ctx.stride[1], + weight.size(3), + weight.size(2), + ctx.stride[1], ctx.stride[0], - ctx.padding[1], - ctx.padding[0], + ctx.padding[1], + ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], - ctx.groups, + ctx.dilation[0], + ctx.groups, ctx.deformable_groups, cur_im2col_step ) @@ -85,22 +85,22 @@ def backward(ctx, grad_output): grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) _C.deform_conv_backward_input( - input, - offset, - grad_output, + input, + offset, + grad_output, grad_input, - grad_offset, - weight, - ctx.bufs_[0], + grad_offset, + weight, + ctx.bufs_[0], weight.size(3), - weight.size(2), - ctx.stride[1], + weight.size(2), + ctx.stride[1], ctx.stride[0], - ctx.padding[1], - ctx.padding[0], + ctx.padding[1], + ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], - ctx.groups, + ctx.dilation[0], + ctx.groups, ctx.deformable_groups, cur_im2col_step ) @@ -108,22 +108,22 @@ def backward(ctx, grad_output): if ctx.needs_input_grad[2]: grad_weight = torch.zeros_like(weight) _C.deform_conv_backward_parameters( - input, - offset, + input, + offset, grad_output, - grad_weight, - ctx.bufs_[0], - ctx.bufs_[1], + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], weight.size(3), - weight.size(2), - ctx.stride[1], + weight.size(2), + ctx.stride[1], ctx.stride[0], - ctx.padding[1], - ctx.padding[0], + ctx.padding[1], + ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, 1, cur_im2col_step ) @@ -180,24 +180,24 @@ def forward( ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] _C.modulated_deform_conv_forward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, output, - ctx._bufs[1], - weight.shape[2], - weight.shape[3], + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, + ctx.padding, + ctx.padding, ctx.dilation, - ctx.groups, - ctx.deformable_groups, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, ctx.with_bias ) return output @@ -214,29 +214,29 @@ def backward(ctx, grad_output): grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) _C.modulated_deform_conv_backward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, ctx._bufs[1], - grad_input, - grad_weight, - grad_bias, - grad_offset, + grad_input, + grad_weight, + grad_bias, + grad_offset, grad_mask, - grad_output, - weight.shape[2], - weight.shape[3], + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, + ctx.padding, + ctx.padding, + ctx.dilation, ctx.dilation, - ctx.groups, - ctx.deformable_groups, + ctx.groups, + ctx.deformable_groups, ctx.with_bias ) if not ctx.with_bias: diff --git a/maskrcnn_benchmark/layers/dcn/deform_pool_func.py b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py index 2f7810b23..e083b002e 100644 --- a/maskrcnn_benchmark/layers/dcn/deform_pool_func.py +++ b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py @@ -39,18 +39,18 @@ def forward( output = data.new_empty(n, out_channels, out_size, out_size) output_count = data.new_empty(n, out_channels, out_size, out_size) _C.deform_psroi_pooling_forward( - data, - rois, - offset, - output, - output_count, + data, + rois, + offset, + output, + output_count, ctx.no_trans, - ctx.spatial_scale, - ctx.out_channels, - ctx.group_size, + ctx.spatial_scale, + ctx.out_channels, + ctx.group_size, ctx.out_size, - ctx.part_size, - ctx.sample_per_part, + ctx.part_size, + ctx.sample_per_part, ctx.trans_std ) @@ -73,19 +73,19 @@ def backward(ctx, grad_output): grad_offset = torch.zeros_like(offset) _C.deform_psroi_pooling_backward( - grad_output, - data, - rois, - offset, - output_count, + grad_output, + data, + rois, + offset, + output_count, grad_input, - grad_offset, - ctx.no_trans, - ctx.spatial_scale, + grad_offset, + ctx.no_trans, + ctx.spatial_scale, ctx.out_channels, - ctx.group_size, - ctx.out_size, - ctx.part_size, + ctx.group_size, + ctx.out_size, + ctx.part_size, ctx.sample_per_part, ctx.trans_std ) diff --git a/maskrcnn_benchmark/layers/misc.py b/maskrcnn_benchmark/layers/misc.py index b64f23840..871132419 100644 --- a/maskrcnn_benchmark/layers/misc.py +++ b/maskrcnn_benchmark/layers/misc.py @@ -114,12 +114,12 @@ def _output_size(dim): class DFConv2d(nn.Module): """Deformable convolutional layer""" def __init__( - self, - in_channels, - out_channels, - with_modulated_dcn=True, - kernel_size=3, - stride=1, + self, + in_channels, + out_channels, + with_modulated_dcn=True, + kernel_size=3, + stride=1, groups=1, dilation=1, deformable_groups=1, @@ -156,7 +156,7 @@ def __init__( padding=padding, groups=1, dilation=dilation - ) + ) for l in [self.offset,]: nn.init.kaiming_uniform_(l.weight, a=1) torch.nn.init.constant_(l.bias, 0.) @@ -192,10 +192,10 @@ def forward(self, x): output_shape = [ (i + 2 * p - (di * (k - 1) + 1)) // d + 1 for i, p, di, k, d in zip( - x.shape[-2:], - self.padding, - self.dilation, - self.kernel_size, + x.shape[-2:], + self.padding, + self.dilation, + self.kernel_size, self.stride ) ] diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index fc02dc1e8..3fd2d41e7 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -288,11 +288,11 @@ def __init__( deformable_groups = dcn_config.get("deformable_groups", 1) with_modulated_dcn = dcn_config.get("with_modulated_dcn", False) self.conv2 = DFConv2d( - bottleneck_channels, - bottleneck_channels, - with_modulated_dcn=with_modulated_dcn, - kernel_size=3, - stride=stride_3x3, + bottleneck_channels, + bottleneck_channels, + with_modulated_dcn=with_modulated_dcn, + kernel_size=3, + stride=stride_3x3, groups=num_groups, dilation=dilation, deformable_groups=deformable_groups, @@ -332,8 +332,8 @@ def forward(self, x): out = self.bn2(out) out = F.relu_(out) - out0 = self.conv3(out) - out = self.bn3(out0) + out = self.conv3(out) + out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) diff --git a/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py b/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py index c0bd00444..902a60eb2 100644 --- a/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py +++ b/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py @@ -11,7 +11,7 @@ def __init__(self, batch_size_per_image, positive_fraction): """ Arguments: batch_size_per_image (int): number of elements to be selected per image - positive_fraction (float): percentace of positive elements per batch + positive_fraction (float): percentage of positive elements per batch """ self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py index 74e56b0e2..049aee6d1 100644 --- a/maskrcnn_benchmark/modeling/make_layers.py +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -34,29 +34,29 @@ def group_norm(out_channels, affine=True, divisor=1): num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5 return torch.nn.GroupNorm( - get_group_gn(out_channels, dim_per_gp, num_groups), - out_channels, - eps, + get_group_gn(out_channels, dim_per_gp, num_groups), + out_channels, + eps, affine ) def make_conv3x3( - in_channels, - out_channels, - dilation=1, - stride=1, + in_channels, + out_channels, + dilation=1, + stride=1, use_gn=False, use_relu=False, kaiming_init=True ): conv = Conv2d( - in_channels, - out_channels, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, bias=False if use_gn else True ) if kaiming_init: @@ -97,12 +97,12 @@ def make_conv( in_channels, out_channels, kernel_size, stride=1, dilation=1 ): conv = Conv2d( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=dilation * (kernel_size - 1) // 2, - dilation=dilation, + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=dilation * (kernel_size - 1) // 2, + dilation=dilation, bias=False if use_gn else True ) # Caffe2 implementation uses XavierFill, which in fact diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py index 595a2e616..cc2f4fa85 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py @@ -22,7 +22,8 @@ def __init__( nms=0.5, detections_per_img=100, box_coder=None, - cls_agnostic_bbox_reg=False + cls_agnostic_bbox_reg=False, + bbox_aug_enabled=False ): """ Arguments: @@ -39,6 +40,7 @@ def __init__( box_coder = BoxCoder(weights=(10., 10., 5., 5.)) self.box_coder = box_coder self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg + self.bbox_aug_enabled = bbox_aug_enabled def forward(self, x, boxes): """ @@ -79,7 +81,8 @@ def forward(self, x, boxes): ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) - boxlist = self.filter_results(boxlist, num_classes) + if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later + boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results @@ -156,12 +159,14 @@ def make_roi_box_post_processor(cfg): nms_thresh = cfg.MODEL.ROI_HEADS.NMS detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG + bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED postprocessor = PostProcessor( score_thresh, nms_thresh, detections_per_img, box_coder, - cls_agnostic_bbox_reg + cls_agnostic_bbox_reg, + bbox_aug_enabled ) return postprocessor diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py index 9f2771d02..a1fdd2308 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py @@ -19,10 +19,10 @@ class FastRCNNLossComputation(object): """ def __init__( - self, - proposal_matcher, - fg_bg_sampler, - box_coder, + self, + proposal_matcher, + fg_bg_sampler, + box_coder, cls_agnostic_bbox_reg=False ): """ @@ -184,9 +184,9 @@ def make_roi_box_loss_evaluator(cfg): cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG loss_evaluator = FastRCNNLossComputation( - matcher, - fg_bg_sampler, - box_coder, + matcher, + fg_bg_sampler, + box_coder, cls_agnostic_bbox_reg ) diff --git a/maskrcnn_benchmark/modeling/rpn/rpn.py b/maskrcnn_benchmark/modeling/rpn/rpn.py index 07997651c..c279a232f 100644 --- a/maskrcnn_benchmark/modeling/rpn/rpn.py +++ b/maskrcnn_benchmark/modeling/rpn/rpn.py @@ -108,8 +108,8 @@ def forward(self, x): class RPNModule(torch.nn.Module): """ - Module for RPN computation. Takes feature maps from the backbone and RPN - proposals and losses. Works for both FPN and non-FPN. + Module for RPN computation. Takes feature maps from the backbone and outputs + RPN proposals and losses. Works for both FPN and non-FPN. """ def __init__(self, cfg, in_channels): diff --git a/maskrcnn_benchmark/structures/bounding_box.py b/maskrcnn_benchmark/structures/bounding_box.py index 4084024fa..25791d578 100644 --- a/maskrcnn_benchmark/structures/bounding_box.py +++ b/maskrcnn_benchmark/structures/bounding_box.py @@ -166,7 +166,7 @@ def transpose(self, method): def crop(self, box): """ - Cropss a rectangular region from this bounding box. The box is a + Crops a rectangular region from this bounding box. The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. """ diff --git a/maskrcnn_benchmark/structures/boxlist_ops.py b/maskrcnn_benchmark/structures/boxlist_ops.py index dc51212f4..02dcaf121 100644 --- a/maskrcnn_benchmark/structures/boxlist_ops.py +++ b/maskrcnn_benchmark/structures/boxlist_ops.py @@ -67,7 +67,8 @@ def boxlist_iou(boxlist1, boxlist2): if boxlist1.size != boxlist2.size: raise RuntimeError( "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) - + boxlist1 = boxlist1.convert("xyxy") + boxlist2 = boxlist2.convert("xyxy") N = len(boxlist1) M = len(boxlist2) diff --git a/maskrcnn_benchmark/structures/segmentation_mask.py b/maskrcnn_benchmark/structures/segmentation_mask.py index 060d512b3..89b80aff2 100644 --- a/maskrcnn_benchmark/structures/segmentation_mask.py +++ b/maskrcnn_benchmark/structures/segmentation_mask.py @@ -3,7 +3,7 @@ import torch import numpy as np from maskrcnn_benchmark.layers.misc import interpolate - +from maskrcnn_benchmark.utils import cv2_util import pycocotools.mask as mask_utils # transpose @@ -48,29 +48,53 @@ def __init__(self, masks, size): initializing source data intact. """ + assert isinstance(size, (list, tuple)) + assert len(size) == 2 + if isinstance(masks, torch.Tensor): # The raw data representation is passed as argument masks = masks.clone() elif isinstance(masks, (list, tuple)): - if isinstance(masks[0], torch.Tensor): + if len(masks) == 0: + masks = torch.empty([0, size[1], size[0]]) # num_instances = 0! + elif isinstance(masks[0], torch.Tensor): masks = torch.stack(masks, dim=2).clone() elif isinstance(masks[0], dict) and "counts" in masks[0]: # RLE interpretation - assert all( - [(size[1], size[0]) == tuple(inst["size"]) for inst in masks] - ) # in RLE, height come first in "size" + rle_sizes = [tuple(inst["size"]) for inst in masks] + masks = mask_utils.decode(masks) # [h, w, n] masks = torch.tensor(masks).permute(2, 0, 1) # [n, h, w] + + assert rle_sizes.count(rle_sizes[0]) == len(rle_sizes), ( + "All the sizes must be the same size: %s" % rle_sizes + ) + + # in RLE, height come first in "size" + rle_height, rle_width = rle_sizes[0] + assert masks.shape[1] == rle_height + assert masks.shape[2] == rle_width + + width, height = size + if width != rle_width or height != rle_height: + masks = interpolate( + input=masks[None].float(), + size=(height, width), + mode="bilinear", + align_corners=False, + )[0].type_as(masks) else: RuntimeError( - "Type of `masks[0]` could not be interpreted: %s" % type(masks) + "Type of `masks[0]` could not be interpreted: %s" + % type(masks) ) elif isinstance(masks, BinaryMaskList): # just hard copy the BinaryMaskList instance's underlying data masks = masks.masks.clone() else: RuntimeError( - "Type of `masks` argument could not be interpreted:%s" % type(masks) + "Type of `masks` argument could not be interpreted:%s" + % type(masks) ) if len(masks.shape) == 2: @@ -122,7 +146,7 @@ def resize(self, size): assert height > 0 # Height comes first here! - resized_masks = torch.nn.functional.interpolate( + resized_masks = interpolate( input=self.masks[None].float(), size=(height, width), mode="bilinear", @@ -132,6 +156,9 @@ def resize(self, size): return BinaryMaskList(resized_masks, resized_size) def convert_to_polygon(self): + if self.masks.numel() == 0: + return PolygonList([], self.size) + contours = self._findContours() return PolygonList(contours, self.size) @@ -143,14 +170,16 @@ def _findContours(self): masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) - contour, hierarchy = cv2.findContours( + contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 - assert entity.shape[1] == 1, "Hierarchical contours are not allowed" + assert ( + entity.shape[1] == 1 + ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours @@ -159,10 +188,9 @@ def __len__(self): return len(self.masks) def __getitem__(self, index): - # Probably it can cause some overhead - # but preserves consistency - masks = self.masks[index].clone() - return BinaryMaskList(masks, self.size) + if self.masks.numel() == 0: + raise RuntimeError("Indexing empty BinaryMaskList") + return BinaryMaskList(self.masks[index], self.size) def __iter__(self): return iter(self.masks) @@ -202,7 +230,8 @@ def __init__(self, polygons, size): else: RuntimeError( - "Type of argument `polygons` is not allowed:%s" % (type(polygons)) + "Type of argument `polygons` is not allowed:%s" + % (type(polygons)) ) """ This crashes the training way too many times... @@ -274,7 +303,9 @@ def resize(self, size): assert isinstance(size, (int, float)) size = size, size - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) + ratios = tuple( + float(s) / float(s_orig) for s, s_orig in zip(size, self.size) + ) if ratios[0] == ratios[1]: ratio = ratios[0] @@ -308,7 +339,7 @@ def __repr__(self): s = self.__class__.__name__ + "(" s += "num_groups={}, ".format(len(self.polygons)) s += "image_width={}, ".format(self.size[0]) - s += "image_height={}, ".format(self.size[1]) + s += "image_height={})".format(self.size[1]) return s @@ -345,7 +376,9 @@ def __init__(self, polygons, size): type(polygons[0][0]) ) else: - assert isinstance(polygons[0], PolygonInstance), str(type(polygons[0])) + assert isinstance(polygons[0], PolygonInstance), str( + type(polygons[0]) + ) elif isinstance(polygons, PolygonList): size = polygons.size @@ -353,7 +386,8 @@ def __init__(self, polygons, size): else: RuntimeError( - "Type of argument `polygons` is not allowed:%s" % (type(polygons)) + "Type of argument `polygons` is not allowed:%s" + % (type(polygons)) ) assert isinstance(size, (list, tuple)), str(type(size)) @@ -400,7 +434,9 @@ def to(self, *args, **kwargs): def convert_to_binarymask(self): if len(self) > 0: - masks = torch.stack([p.convert_to_binarymask() for p in self.polygons]) + masks = torch.stack( + [p.convert_to_binarymask() for p in self.polygons] + ) else: size = self.size masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8) diff --git a/maskrcnn_benchmark/utils/checkpoint.py b/maskrcnn_benchmark/utils/checkpoint.py index dc403f5db..2af2565ed 100644 --- a/maskrcnn_benchmark/utils/checkpoint.py +++ b/maskrcnn_benchmark/utils/checkpoint.py @@ -49,8 +49,8 @@ def save(self, name, **kwargs): torch.save(data, save_file) self.tag_last_checkpoint(save_file) - def load(self, f=None): - if self.has_checkpoint(): + def load(self, f=None, use_latest=True): + if self.has_checkpoint() and use_latest: # override argument with existing checkpoint f = self.get_checkpoint_file() if not f: diff --git a/maskrcnn_benchmark/utils/comm.py b/maskrcnn_benchmark/utils/comm.py index 46d7c55ce..669f208ad 100644 --- a/maskrcnn_benchmark/utils/comm.py +++ b/maskrcnn_benchmark/utils/comm.py @@ -63,8 +63,8 @@ def all_gather(data): tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank - local_size = torch.IntTensor([tensor.numel()]).to("cuda") - size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] + local_size = torch.LongTensor([tensor.numel()]).to("cuda") + size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) diff --git a/maskrcnn_benchmark/utils/miscellaneous.py b/maskrcnn_benchmark/utils/miscellaneous.py index db9a8b367..ce1c279bf 100644 --- a/maskrcnn_benchmark/utils/miscellaneous.py +++ b/maskrcnn_benchmark/utils/miscellaneous.py @@ -1,6 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import errno +import json +import logging import os +from .comm import is_main_process def mkdir(path): @@ -9,3 +12,28 @@ def mkdir(path): except OSError as e: if e.errno != errno.EEXIST: raise + + +def save_labels(dataset_list, output_dir): + if is_main_process(): + logger = logging.getLogger(__name__) + + ids_to_labels = {} + for dataset in dataset_list: + if hasattr(dataset, 'categories'): + ids_to_labels.update(dataset.categories) + else: + logger.warning("Dataset [{}] has no categories attribute, labels.json file won't be created".format( + dataset.__class__.__name__)) + + if ids_to_labels: + labels_file = os.path.join(output_dir, 'labels.json') + logger.info("Saving labels mapping into {}".format(labels_file)) + with open(labels_file, 'w') as f: + json.dump(ids_to_labels, f, indent=2) + + +def save_config(cfg, path): + if is_main_process(): + with open(path, 'w') as f: + f.write(cfg.dump()) diff --git a/tests/test_segmentation_mask.py b/tests/test_segmentation_mask.py index d01ed9452..3f70ed551 100644 --- a/tests/test_segmentation_mask.py +++ b/tests/test_segmentation_mask.py @@ -20,13 +20,11 @@ def __init__(self, method_name='runTest'): self.P = SegmentationMask(poly, size, 'poly') self.M = SegmentationMask(poly, size, 'poly').convert('mask') - def L1(self, A, B): diff = A.get_mask_tensor() - B.get_mask_tensor() diff = torch.sum(torch.abs(diff.float())).item() return diff - def test_convert(self): M_hat = self.M.convert('poly').convert('mask') P_hat = self.P.convert('mask').convert('poly') @@ -37,13 +35,11 @@ def test_convert(self): self.assertTrue(diff_mask <= 8169.) self.assertTrue(diff_poly <= 8169.) - def test_crop(self): box = [400, 250, 500, 300] # xyxy diff = self.L1(self.M.crop(box), self.P.crop(box)) self.assertTrue(diff <= 1.) - def test_resize(self): new_size = 50, 25 M_hat = self.M.resize(new_size) @@ -55,7 +51,6 @@ def test_resize(self): self.assertTrue(self.M.size != M_hat.size) self.assertTrue(diff <= 255.) - def test_transpose(self): FLIP_LEFT_RIGHT = 0 FLIP_TOP_BOTTOM = 1 diff --git a/tools/test_net.py b/tools/test_net.py index c666a4655..ee3bf4cab 100644 --- a/tools/test_net.py +++ b/tools/test_net.py @@ -33,6 +33,11 @@ def main(): help="path to config file", ) parser.add_argument("--local_rank", type=int, default=0) + parser.add_argument( + "--ckpt", + help="The path to the checkpoint for test, default is the latest checkpoint.", + default=None, + ) parser.add_argument( "opts", help="Modify config options using the command-line", @@ -73,7 +78,8 @@ def main(): output_dir = cfg.OUTPUT_DIR checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) - _ = checkpointer.load(cfg.MODEL.WEIGHT) + ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt + _ = checkpointer.load(ckpt, use_latest=args.ckpt is None) iou_types = ("bbox",) if cfg.MODEL.MASK_ON: diff --git a/tools/train_net.py b/tools/train_net.py index 9f4761b3f..3468fbb4a 100644 --- a/tools/train_net.py +++ b/tools/train_net.py @@ -23,7 +23,7 @@ from maskrcnn_benchmark.utils.comm import synchronize, get_rank from maskrcnn_benchmark.utils.imports import import_file from maskrcnn_benchmark.utils.logger import setup_logger -from maskrcnn_benchmark.utils.miscellaneous import mkdir +from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config # See if we can use apex.DistributedDataParallel instead of the torch default, # and enable mixed-precision via apex.amp @@ -176,6 +176,11 @@ def main(): logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) + output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml') + logger.info("Saving config into: {}".format(output_config_path)) + # save overloaded model config in the output directory + save_config(cfg, output_config_path) + model = train(cfg, args.local_rank, args.distributed) if not args.skip_test: